summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2015-03-27 12:04:12 +0100
committerChris Dickinson <christopher.s.dickinson@gmail.com>2015-04-28 14:38:16 -0700
commit36cd5fb9d27b830320e57213f5b8829ffbb93324 (patch)
treebbab4215d26f8597019135206426fccf27a3089e /deps
parentb57cc51d8d3f4ad279591ae8fa6584ee22773b97 (diff)
downloadandroid-node-v8-36cd5fb9d27b830320e57213f5b8829ffbb93324.tar.gz
android-node-v8-36cd5fb9d27b830320e57213f5b8829ffbb93324.tar.bz2
android-node-v8-36cd5fb9d27b830320e57213f5b8829ffbb93324.zip
deps: upgrade v8 to 4.2.77.13
This commit applies some secondary changes in order to make `make test` pass cleanly: * disable broken postmortem debugging in common.gypi * drop obsolete strict mode test in parallel/test-repl * drop obsolete test parallel/test-v8-features PR-URL: https://github.com/iojs/io.js/pull/1232 Reviewed-By: Fedor Indutny <fedor@indutny.com>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/.ycm_extra_conf.py193
-rw-r--r--deps/v8/AUTHORS84
-rw-r--r--deps/v8/BUILD.gn389
-rw-r--r--deps/v8/ChangeLog530
-rw-r--r--deps/v8/DEPS21
-rw-r--r--deps/v8/Makefile85
-rw-r--r--deps/v8/OWNERS1
-rw-r--r--deps/v8/PRESUBMIT.py18
-rw-r--r--deps/v8/build/features.gypi8
-rw-r--r--deps/v8/build/mac/asan.gyp31
-rw-r--r--deps/v8/build/standalone.gypi119
-rw-r--r--deps/v8/build/toolchain.gypi271
-rw-r--r--deps/v8/codereview.settings1
-rw-r--r--deps/v8/include/v8-util.h256
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h296
-rw-r--r--deps/v8/include/v8config.h6
-rw-r--r--deps/v8/samples/shell.cc1
-rw-r--r--deps/v8/src/accessors.cc77
-rw-r--r--deps/v8/src/accessors.h5
-rw-r--r--deps/v8/src/allocation-tracker.cc4
-rw-r--r--deps/v8/src/allocation.cc10
-rw-r--r--deps/v8/src/allocation.h3
-rw-r--r--deps/v8/src/api-natives.cc588
-rw-r--r--deps/v8/src/api-natives.h53
-rw-r--r--deps/v8/src/api.cc851
-rw-r--r--deps/v8/src/api.h101
-rw-r--r--deps/v8/src/apinatives.js119
-rw-r--r--deps/v8/src/arguments.h4
-rw-r--r--deps/v8/src/arm/assembler-arm.cc120
-rw-r--r--deps/v8/src/arm/assembler-arm.h5
-rw-r--r--deps/v8/src/arm/builtins-arm.cc155
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc401
-rw-r--r--deps/v8/src/arm/constants-arm.h1
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc3
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc351
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc49
-rw-r--r--deps/v8/src/arm/lithium-arm.cc32
-rw-r--r--deps/v8/src/arm/lithium-arm.h15
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc283
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h18
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc169
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h22
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc11
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h3
-rw-r--r--deps/v8/src/arm/simulator-arm.cc50
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc31
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h5
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc150
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc456
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc3
-rw-r--r--deps/v8/src/arm64/full-codegen-arm64.cc356
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc53
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc28
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h14
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc333
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.h45
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc176
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h22
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.cc11
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.h3
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc3
-rw-r--r--deps/v8/src/array.js2
-rw-r--r--deps/v8/src/assembler.cc147
-rw-r--r--deps/v8/src/assembler.h63
-rw-r--r--deps/v8/src/ast-numbering.cc89
-rw-r--r--deps/v8/src/ast-numbering.h2
-rw-r--r--deps/v8/src/ast-this-access-visitor.cc239
-rw-r--r--deps/v8/src/ast-this-access-visitor.h34
-rw-r--r--deps/v8/src/ast-value-factory.h11
-rw-r--r--deps/v8/src/ast.cc165
-rw-r--r--deps/v8/src/ast.h301
-rw-r--r--deps/v8/src/background-parsing-task.cc16
-rw-r--r--deps/v8/src/bailout-reason.h14
-rw-r--r--deps/v8/src/base/atomicops.h4
-rw-r--r--deps/v8/src/base/atomicops_internals_ppc_gcc.h168
-rw-r--r--deps/v8/src/base/build_config.h21
-rw-r--r--deps/v8/src/base/compiler-specific.h23
-rw-r--r--deps/v8/src/base/cpu.cc90
-rw-r--r--deps/v8/src/base/cpu.h17
-rw-r--r--deps/v8/src/base/logging.cc38
-rw-r--r--deps/v8/src/base/logging.h257
-rw-r--r--deps/v8/src/base/macros.h4
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc2
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc292
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc52
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc5
-rw-r--r--deps/v8/src/base/platform/platform.h3
-rw-r--r--deps/v8/src/base/platform/time.cc8
-rw-r--r--deps/v8/src/base/platform/time.h5
-rw-r--r--deps/v8/src/base/sys-info.cc3
-rw-r--r--deps/v8/src/bit-vector.h8
-rw-r--r--deps/v8/src/bootstrapper.cc533
-rw-r--r--deps/v8/src/bootstrapper.h13
-rw-r--r--deps/v8/src/builtins.cc196
-rw-r--r--deps/v8/src/builtins.h67
-rw-r--r--deps/v8/src/checks.cc83
-rw-r--r--deps/v8/src/checks.h32
-rw-r--r--deps/v8/src/code-factory.cc35
-rw-r--r--deps/v8/src/code-factory.h11
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc211
-rw-r--r--deps/v8/src/code-stubs.cc30
-rw-r--r--deps/v8/src/code-stubs.h269
-rw-r--r--deps/v8/src/code.h3
-rw-r--r--deps/v8/src/codegen.cc18
-rw-r--r--deps/v8/src/codegen.h2
-rw-r--r--deps/v8/src/compilation-cache.cc75
-rw-r--r--deps/v8/src/compilation-cache.h28
-rw-r--r--deps/v8/src/compiler.cc395
-rw-r--r--deps/v8/src/compiler.h201
-rw-r--r--deps/v8/src/compiler/DEPS3
-rw-r--r--deps/v8/src/compiler/STYLE29
-rw-r--r--deps/v8/src/compiler/all-nodes.cc48
-rw-r--r--deps/v8/src/compiler/all-nodes.h41
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc248
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc242
-rw-r--r--deps/v8/src/compiler/arm/linkage-arm.cc22
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc253
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc151
-rw-r--r--deps/v8/src/compiler/arm64/linkage-arm64.cc22
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc1514
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h437
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc8
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.h1
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc1
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.h2
-rw-r--r--deps/v8/src/compiler/change-lowering.cc17
-rw-r--r--deps/v8/src/compiler/change-lowering.h5
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h2
-rw-r--r--deps/v8/src/compiler/code-generator.cc74
-rw-r--r--deps/v8/src/compiler/code-generator.h18
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc1
-rw-r--r--deps/v8/src/compiler/common-operator.cc166
-rw-r--r--deps/v8/src/compiler/common-operator.h21
-rw-r--r--deps/v8/src/compiler/control-builders.cc72
-rw-r--r--deps/v8/src/compiler/control-builders.h84
-rw-r--r--deps/v8/src/compiler/control-equivalence.h8
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc142
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h52
-rw-r--r--deps/v8/src/compiler/control-reducer.cc265
-rw-r--r--deps/v8/src/compiler/control-reducer.h15
-rw-r--r--deps/v8/src/compiler/diamond.h4
-rw-r--r--deps/v8/src/compiler/frame.h17
-rw-r--r--deps/v8/src/compiler/graph-builder.cc279
-rw-r--r--deps/v8/src/compiler/graph-builder.h182
-rw-r--r--deps/v8/src/compiler/graph-inl.h2
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc14
-rw-r--r--deps/v8/src/compiler/graph-reducer.h7
-rw-r--r--deps/v8/src/compiler/graph-replay.cc32
-rw-r--r--deps/v8/src/compiler/graph-replay.h6
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc152
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h5
-rw-r--r--deps/v8/src/compiler/graph.cc50
-rw-r--r--deps/v8/src/compiler/graph.h82
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc109
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc165
-rw-r--r--deps/v8/src/compiler/ia32/linkage-ia32.cc22
-rw-r--r--deps/v8/src/compiler/instruction-codes.h10
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h196
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc303
-rw-r--r--deps/v8/src/compiler/instruction-selector.h85
-rw-r--r--deps/v8/src/compiler/instruction.cc143
-rw-r--r--deps/v8/src/compiler/instruction.h368
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc3
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc17
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h6
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc233
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h13
-rw-r--r--deps/v8/src/compiler/js-graph.cc26
-rw-r--r--deps/v8/src/compiler/js-graph.h29
-rw-r--r--deps/v8/src/compiler/js-inlining.cc225
-rw-r--r--deps/v8/src/compiler/js-inlining.h9
-rw-r--r--deps/v8/src/compiler/js-intrinsic-builder.cc140
-rw-r--r--deps/v8/src/compiler/js-intrinsic-builder.h40
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc194
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h52
-rw-r--r--deps/v8/src/compiler/js-operator.cc39
-rw-r--r--deps/v8/src/compiler/js-operator.h15
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc104
-rw-r--r--deps/v8/src/compiler/jump-threading.cc2
-rw-r--r--deps/v8/src/compiler/linkage-impl.h41
-rw-r--r--deps/v8/src/compiler/linkage.cc179
-rw-r--r--deps/v8/src/compiler/linkage.h53
-rw-r--r--deps/v8/src/compiler/load-elimination.cc2
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc330
-rw-r--r--deps/v8/src/compiler/loop-analysis.h28
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc339
-rw-r--r--deps/v8/src/compiler/loop-peeling.h42
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc165
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h1
-rw-r--r--deps/v8/src/compiler/machine-operator.cc2
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc362
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc171
-rw-r--r--deps/v8/src/compiler/mips/linkage-mips.cc22
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc475
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc178
-rw-r--r--deps/v8/src/compiler/mips64/linkage-mips64.cc22
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc232
-rw-r--r--deps/v8/src/compiler/move-optimizer.h8
-rw-r--r--deps/v8/src/compiler/node-aux-data-inl.h43
-rw-r--r--deps/v8/src/compiler/node-aux-data.h25
-rw-r--r--deps/v8/src/compiler/node-cache.cc4
-rw-r--r--deps/v8/src/compiler/node-marker.cc40
-rw-r--r--deps/v8/src/compiler/node-marker.h62
-rw-r--r--deps/v8/src/compiler/node-properties-inl.h226
-rw-r--r--deps/v8/src/compiler/node-properties.cc247
-rw-r--r--deps/v8/src/compiler/node-properties.h141
-rw-r--r--deps/v8/src/compiler/node.cc236
-rw-r--r--deps/v8/src/compiler/node.h547
-rw-r--r--deps/v8/src/compiler/opcodes.cc2
-rw-r--r--deps/v8/src/compiler/opcodes.h91
-rw-r--r--deps/v8/src/compiler/operator-properties.cc18
-rw-r--r--deps/v8/src/compiler/operator.cc13
-rw-r--r--deps/v8/src/compiler/operator.h22
-rw-r--r--deps/v8/src/compiler/osr.cc286
-rw-r--r--deps/v8/src/compiler/osr.h127
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc2
-rw-r--r--deps/v8/src/compiler/pipeline.cc582
-rw-r--r--deps/v8/src/compiler/pipeline.h16
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc1363
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h125
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc1383
-rw-r--r--deps/v8/src/compiler/ppc/linkage-ppc.cc69
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc40
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h12
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc548
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h7
-rw-r--r--deps/v8/src/compiler/register-allocator.cc209
-rw-r--r--deps/v8/src/compiler/register-allocator.h62
-rw-r--r--deps/v8/src/compiler/register-configuration.cc1
-rw-r--r--deps/v8/src/compiler/register-configuration.h2
-rw-r--r--deps/v8/src/compiler/representation-change.h41
-rw-r--r--deps/v8/src/compiler/schedule.cc90
-rw-r--r--deps/v8/src/compiler/schedule.h76
-rw-r--r--deps/v8/src/compiler/scheduler.cc370
-rw-r--r--deps/v8/src/compiler/scheduler.h20
-rw-r--r--deps/v8/src/compiler/select-lowering.cc2
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc155
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h14
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc3
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h1
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc55
-rw-r--r--deps/v8/src/compiler/simplified-operator.h2
-rw-r--r--deps/v8/src/compiler/source-position.cc4
-rw-r--r--deps/v8/src/compiler/typer.cc388
-rw-r--r--deps/v8/src/compiler/typer.h11
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.cc4
-rw-r--r--deps/v8/src/compiler/verifier.cc175
-rw-r--r--deps/v8/src/compiler/verifier.h2
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc139
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc189
-rw-r--r--deps/v8/src/compiler/x64/linkage-x64.cc22
-rw-r--r--deps/v8/src/compiler/zone-pool.cc9
-rw-r--r--deps/v8/src/compiler/zone-pool.h5
-rw-r--r--deps/v8/src/contexts.cc4
-rw-r--r--deps/v8/src/contexts.h25
-rw-r--r--deps/v8/src/counters.cc22
-rw-r--r--deps/v8/src/counters.h153
-rw-r--r--deps/v8/src/cpu-profiler-inl.h6
-rw-r--r--deps/v8/src/cpu-profiler.cc27
-rw-r--r--deps/v8/src/cpu-profiler.h26
-rw-r--r--deps/v8/src/d8.cc272
-rw-r--r--deps/v8/src/d8.gyp4
-rw-r--r--deps/v8/src/d8.h16
-rw-r--r--deps/v8/src/date.cc4
-rw-r--r--deps/v8/src/date.js6
-rw-r--r--deps/v8/src/debug.cc8
-rw-r--r--deps/v8/src/deoptimizer.cc106
-rw-r--r--deps/v8/src/deoptimizer.h116
-rw-r--r--deps/v8/src/disassembler.cc29
-rw-r--r--deps/v8/src/elements.cc118
-rw-r--r--deps/v8/src/elements.h16
-rw-r--r--deps/v8/src/execution.cc107
-rw-r--r--deps/v8/src/execution.h6
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc141
-rw-r--r--deps/v8/src/factory.cc250
-rw-r--r--deps/v8/src/factory.h25
-rw-r--r--deps/v8/src/field-index-inl.h13
-rw-r--r--deps/v8/src/flag-definitions.h79
-rw-r--r--deps/v8/src/flags.cc3
-rw-r--r--deps/v8/src/frames-inl.h2
-rw-r--r--deps/v8/src/frames.cc4
-rw-r--r--deps/v8/src/full-codegen.cc118
-rw-r--r--deps/v8/src/full-codegen.h41
-rw-r--r--deps/v8/src/gdb-jit.cc6
-rw-r--r--deps/v8/src/global-handles.cc193
-rw-r--r--deps/v8/src/global-handles.h41
-rw-r--r--deps/v8/src/globals.h126
-rw-r--r--deps/v8/src/harmony-array.js42
-rw-r--r--deps/v8/src/harmony-classes.js35
-rw-r--r--deps/v8/src/harmony-tostring.js19
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc24
-rw-r--r--deps/v8/src/heap/gc-tracer.cc16
-rw-r--r--deps/v8/src/heap/gc-tracer.h11
-rw-r--r--deps/v8/src/heap/heap-inl.h21
-rw-r--r--deps/v8/src/heap/heap.cc264
-rw-r--r--deps/v8/src/heap/heap.h265
-rw-r--r--deps/v8/src/heap/incremental-marking.cc37
-rw-r--r--deps/v8/src/heap/incremental-marking.h17
-rw-r--r--deps/v8/src/heap/mark-compact.cc367
-rw-r--r--deps/v8/src/heap/mark-compact.h20
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h43
-rw-r--r--deps/v8/src/heap/objects-visiting.cc9
-rw-r--r--deps/v8/src/heap/spaces-inl.h14
-rw-r--r--deps/v8/src/heap/spaces.cc216
-rw-r--r--deps/v8/src/heap/spaces.h91
-rw-r--r--deps/v8/src/heap/store-buffer.cc28
-rw-r--r--deps/v8/src/hydrogen-bch.cc6
-rw-r--r--deps/v8/src/hydrogen-check-elimination.cc6
-rw-r--r--deps/v8/src/hydrogen-dce.cc3
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.cc8
-rw-r--r--deps/v8/src/hydrogen-gvn.h1
-rw-r--r--deps/v8/src/hydrogen-instructions.cc370
-rw-r--r--deps/v8/src/hydrogen-instructions.h570
-rw-r--r--deps/v8/src/hydrogen-types.cc3
-rw-r--r--deps/v8/src/hydrogen-types.h33
-rw-r--r--deps/v8/src/hydrogen.cc728
-rw-r--r--deps/v8/src/hydrogen.h326
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc140
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h32
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc155
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc488
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc6
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc4
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc344
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc47
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc373
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h21
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc32
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h16
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc219
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h26
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc11
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h3
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc149
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc174
-rw-r--r--deps/v8/src/ic/arm/ic-compiler-arm.cc17
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc151
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc195
-rw-r--r--deps/v8/src/ic/arm64/ic-compiler-arm64.cc17
-rw-r--r--deps/v8/src/ic/call-optimization.cc32
-rw-r--r--deps/v8/src/ic/call-optimization.h7
-rw-r--r--deps/v8/src/ic/handler-compiler.cc119
-rw-r--r--deps/v8/src/ic/handler-compiler.h81
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc166
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc17
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc166
-rw-r--r--deps/v8/src/ic/ic-compiler.cc61
-rw-r--r--deps/v8/src/ic/ic-compiler.h21
-rw-r--r--deps/v8/src/ic/ic-inl.h42
-rw-r--r--deps/v8/src/ic/ic-state.cc348
-rw-r--r--deps/v8/src/ic/ic-state.h27
-rw-r--r--deps/v8/src/ic/ic.cc549
-rw-r--r--deps/v8/src/ic/ic.h116
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc158
-rw-r--r--deps/v8/src/ic/mips/ic-compiler-mips.cc17
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc174
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc158
-rw-r--r--deps/v8/src/ic/mips64/ic-compiler-mips64.cc17
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc181
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc253
-rw-r--r--deps/v8/src/ic/ppc/ic-compiler-ppc.cc29
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc230
-rw-r--r--deps/v8/src/ic/ppc/stub-cache-ppc.cc78
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc158
-rw-r--r--deps/v8/src/ic/x64/ic-compiler-x64.cc17
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc166
-rw-r--r--deps/v8/src/ic/x64/stub-cache-x64.cc2
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc166
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc17
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc168
-rw-r--r--deps/v8/src/interface-descriptors.h32
-rw-r--r--deps/v8/src/interface.cc266
-rw-r--r--deps/v8/src/interface.h215
-rw-r--r--deps/v8/src/isolate-inl.h5
-rw-r--r--deps/v8/src/isolate.cc120
-rw-r--r--deps/v8/src/isolate.h62
-rw-r--r--deps/v8/src/json-parser.h12
-rw-r--r--deps/v8/src/json-stringifier.h2
-rw-r--r--deps/v8/src/jsregexp.cc136
-rw-r--r--deps/v8/src/jsregexp.h26
-rw-r--r--deps/v8/src/layout-descriptor-inl.h2
-rw-r--r--deps/v8/src/layout-descriptor.cc31
-rw-r--r--deps/v8/src/layout-descriptor.h10
-rw-r--r--deps/v8/src/list.h2
-rw-r--r--deps/v8/src/lithium-allocator-inl.h2
-rw-r--r--deps/v8/src/lithium-allocator.cc7
-rw-r--r--deps/v8/src/lithium-allocator.h2
-rw-r--r--deps/v8/src/lithium-codegen.cc71
-rw-r--r--deps/v8/src/lithium-codegen.h2
-rw-r--r--deps/v8/src/lithium-inl.h2
-rw-r--r--deps/v8/src/lithium.cc55
-rw-r--r--deps/v8/src/lithium.h1
-rw-r--r--deps/v8/src/liveedit.cc21
-rw-r--r--deps/v8/src/log-utils.h8
-rw-r--r--deps/v8/src/log.cc19
-rw-r--r--deps/v8/src/log.h3
-rw-r--r--deps/v8/src/lookup-inl.h4
-rw-r--r--deps/v8/src/lookup.cc53
-rw-r--r--deps/v8/src/lookup.h23
-rw-r--r--deps/v8/src/macro-assembler.h7
-rw-r--r--deps/v8/src/macros.py18
-rw-r--r--deps/v8/src/messages.h10
-rw-r--r--deps/v8/src/messages.js22
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h4
-rw-r--r--deps/v8/src/mips/assembler-mips.cc185
-rw-r--r--deps/v8/src/mips/assembler-mips.h21
-rw-r--r--deps/v8/src/mips/builtins-mips.cc153
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc413
-rw-r--r--deps/v8/src/mips/codegen-mips.cc11
-rw-r--r--deps/v8/src/mips/constants-mips.h1
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc3
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc357
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc45
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc323
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h23
-rw-r--r--deps/v8/src/mips/lithium-mips.cc32
-rw-r--r--deps/v8/src/mips/lithium-mips.h15
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc169
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h21
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc11
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.h3
-rw-r--r--deps/v8/src/mips/simulator-mips.cc3
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h4
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc105
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h23
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc154
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc416
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc8
-rw-r--r--deps/v8/src/mips64/constants-mips64.h1
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc3
-rw-r--r--deps/v8/src/mips64/full-codegen-mips64.cc359
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc45
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.cc350
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.h23
-rw-r--r--deps/v8/src/mips64/lithium-mips64.cc32
-rw-r--r--deps/v8/src/mips64/lithium-mips64.h14
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc175
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h21
-rw-r--r--deps/v8/src/mips64/regexp-macro-assembler-mips64.cc11
-rw-r--r--deps/v8/src/mips64/regexp-macro-assembler-mips64.h3
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc5
-rw-r--r--deps/v8/src/mirror-debugger.js28
-rw-r--r--deps/v8/src/mksnapshot.cc32
-rw-r--r--deps/v8/src/modules.cc38
-rw-r--r--deps/v8/src/modules.h99
-rw-r--r--deps/v8/src/objects-debug.cc129
-rw-r--r--deps/v8/src/objects-inl.h333
-rw-r--r--deps/v8/src/objects-printer.cc51
-rw-r--r--deps/v8/src/objects.cc2476
-rw-r--r--deps/v8/src/objects.h670
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc35
-rw-r--r--deps/v8/src/ostreams.cc19
-rw-r--r--deps/v8/src/ostreams.h24
-rw-r--r--deps/v8/src/parser.cc1680
-rw-r--r--deps/v8/src/parser.h183
-rw-r--r--deps/v8/src/perf-jit.cc2
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc191
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h86
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc151
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc728
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h8
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc95
-rw-r--r--deps/v8/src/ppc/constants-ppc.h54
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc11
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc145
-rw-r--r--deps/v8/src/ppc/full-codegen-ppc.cc360
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc54
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.cc757
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.h12
-rw-r--r--deps/v8/src/ppc/lithium-ppc.cc94
-rw-r--r--deps/v8/src/ppc/lithium-ppc.h35
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc378
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h89
-rw-r--r--deps/v8/src/ppc/regexp-macro-assembler-ppc.cc12
-rw-r--r--deps/v8/src/ppc/regexp-macro-assembler-ppc.h3
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc387
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h20
-rw-r--r--deps/v8/src/preparse-data-format.h2
-rw-r--r--deps/v8/src/preparse-data.cc3
-rw-r--r--deps/v8/src/preparse-data.h41
-rw-r--r--deps/v8/src/preparser.cc299
-rw-r--r--deps/v8/src/preparser.h869
-rw-r--r--deps/v8/src/prettyprinter.cc441
-rw-r--r--deps/v8/src/prettyprinter.h43
-rw-r--r--deps/v8/src/profile-generator-inl.h2
-rw-r--r--deps/v8/src/profile-generator.cc56
-rw-r--r--deps/v8/src/profile-generator.h32
-rw-r--r--deps/v8/src/property-details.h21
-rw-r--r--deps/v8/src/property.cc35
-rw-r--r--deps/v8/src/property.h177
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc7
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.h3
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc5
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.h2
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc16
-rw-r--r--deps/v8/src/regexp-macro-assembler.h7
-rw-r--r--deps/v8/src/regexp.js3
-rw-r--r--deps/v8/src/rewriter.cc8
-rw-r--r--deps/v8/src/runtime-profiler.cc8
-rw-r--r--deps/v8/src/runtime.js18
-rw-r--r--deps/v8/src/runtime/runtime-api.cc127
-rw-r--r--deps/v8/src/runtime/runtime-array.cc50
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc149
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc58
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc51
-rw-r--r--deps/v8/src/runtime/runtime-date.cc2
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc16
-rw-r--r--deps/v8/src/runtime/runtime-function.cc45
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc9
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc33
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc20
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc5
-rw-r--r--deps/v8/src/runtime/runtime-object.cc197
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc14
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc86
-rw-r--r--deps/v8/src/runtime/runtime-test.cc56
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc2
-rw-r--r--deps/v8/src/runtime/runtime-utils.h23
-rw-r--r--deps/v8/src/runtime/runtime.cc4
-rw-r--r--deps/v8/src/runtime/runtime.h46
-rw-r--r--deps/v8/src/safepoint-table.cc1
-rw-r--r--deps/v8/src/sampler.cc20
-rw-r--r--deps/v8/src/scanner-character-streams.cc87
-rw-r--r--deps/v8/src/scanner-character-streams.h53
-rw-r--r--deps/v8/src/scanner.cc11
-rw-r--r--deps/v8/src/scanner.h19
-rw-r--r--deps/v8/src/scopeinfo.cc41
-rw-r--r--deps/v8/src/scopeinfo.h6
-rw-r--r--deps/v8/src/scopes.cc264
-rw-r--r--deps/v8/src/scopes.h134
-rw-r--r--deps/v8/src/serialize.cc411
-rw-r--r--deps/v8/src/serialize.h182
-rw-r--r--deps/v8/src/simulator.h2
-rw-r--r--deps/v8/src/snapshot-common.cc153
-rw-r--r--deps/v8/src/snapshot-external.cc14
-rw-r--r--deps/v8/src/snapshot.h54
-rw-r--r--deps/v8/src/startup-data-util.cc91
-rw-r--r--deps/v8/src/startup-data-util.h51
-rw-r--r--deps/v8/src/string-stream.cc2
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.cc12
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.js11
-rw-r--r--deps/v8/src/token.h7
-rw-r--r--deps/v8/src/transitions.cc2
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h5
-rw-r--r--deps/v8/src/type-feedback-vector.cc87
-rw-r--r--deps/v8/src/type-feedback-vector.h24
-rw-r--r--deps/v8/src/type-info.cc21
-rw-r--r--deps/v8/src/type-info.h9
-rw-r--r--deps/v8/src/types-inl.h139
-rw-r--r--deps/v8/src/types.cc566
-rw-r--r--deps/v8/src/types.h233
-rw-r--r--deps/v8/src/typing.cc23
-rw-r--r--deps/v8/src/typing.h5
-rw-r--r--deps/v8/src/unicode-decoder.cc12
-rw-r--r--deps/v8/src/unicode-decoder.h45
-rw-r--r--deps/v8/src/unicode-inl.h2
-rw-r--r--deps/v8/src/unicode.cc4
-rw-r--r--deps/v8/src/unicode.h8
-rw-r--r--deps/v8/src/unique.h2
-rw-r--r--deps/v8/src/utils.cc4
-rw-r--r--deps/v8/src/utils.h184
-rw-r--r--deps/v8/src/v8.cc3
-rw-r--r--deps/v8/src/v8.h4
-rw-r--r--deps/v8/src/v8natives.js9
-rw-r--r--deps/v8/src/variables.cc6
-rw-r--r--deps/v8/src/variables.h15
-rw-r--r--deps/v8/src/vector.h4
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h9
-rw-r--r--deps/v8/src/x64/assembler-x64.cc190
-rw-r--r--deps/v8/src/x64/assembler-x64.h26
-rw-r--r--deps/v8/src/x64/builtins-x64.cc149
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc472
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc2
-rw-r--r--deps/v8/src/x64/disasm-x64.cc6
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc341
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc47
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc359
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h19
-rw-r--r--deps/v8/src/x64/lithium-x64.cc29
-rw-r--r--deps/v8/src/x64/lithium-x64.h25
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc262
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h24
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc11
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h3
-rw-r--r--deps/v8/src/x87/assembler-x87.cc71
-rw-r--r--deps/v8/src/x87/assembler-x87.h32
-rw-r--r--deps/v8/src/x87/builtins-x87.cc155
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc488
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc6
-rw-r--r--deps/v8/src/x87/disasm-x87.cc4
-rw-r--r--deps/v8/src/x87/full-codegen-x87.cc344
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc47
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.cc385
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.h20
-rw-r--r--deps/v8/src/x87/lithium-x87.cc26
-rw-r--r--deps/v8/src/x87/lithium-x87.h16
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc212
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h25
-rw-r--r--deps/v8/src/x87/regexp-macro-assembler-x87.cc11
-rw-r--r--deps/v8/src/x87/regexp-macro-assembler-x87.h3
-rw-r--r--deps/v8/src/zone-containers.h31
-rw-r--r--deps/v8/src/zone-inl.h70
-rw-r--r--deps/v8/src/zone.cc141
-rw-r--r--deps/v8/src/zone.h104
-rw-r--r--deps/v8/test/benchmarks/testcfg.py2
-rw-r--r--deps/v8/test/cctest/cctest.cc7
-rw-r--r--deps/v8/test/cctest/cctest.gyp28
-rw-r--r--deps/v8/test/cctest/cctest.h62
-rw-r--r--deps/v8/test/cctest/cctest.status45
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h5
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc10
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h16
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h6
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.cc12
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h12
-rw-r--r--deps/v8/test/cctest/compiler/simplified-graph-builder.cc4
-rw-r--r--deps/v8/test/cctest/compiler/simplified-graph-builder.h4
-rw-r--r--deps/v8/test/cctest/compiler/test-basic-block-profiler.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-changes-lowering.cc15
-rw-r--r--deps/v8/test/cctest/compiler/test-codegen-deopt.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-control-reducer.cc259
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-graph-reducer.cc622
-rw-r--r--deps/v8/test/cctest/compiler/test-graph-visualizer.cc13
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc113
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc26
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc17
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc81
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc24
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc34
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc165
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc146
-rw-r--r--deps/v8/test/cctest/compiler/test-node-cache.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc216
-rw-r--r--deps/v8/test/cctest/compiler/test-operator.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-osr.cc486
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc24
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsbranches.cc27
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsexceptions.cc115
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsops.cc23
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc335
-rw-r--r--deps/v8/test/cctest/compiler/test-schedule.cc177
-rw-r--r--deps/v8/test/cctest/compiler/test-scheduler.cc2124
-rw-r--r--deps/v8/test/cctest/compiler/test-simplified-lowering.cc66
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h6
-rw-r--r--deps/v8/test/cctest/test-accessors.cc18
-rw-r--r--deps/v8/test/cctest/test-alloc.cc4
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc3110
-rw-r--r--deps/v8/test/cctest/test-api.cc5647
-rw-r--r--deps/v8/test/cctest/test-api.h34
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc224
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc8
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc106
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc364
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc333
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc1060
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc151
-rw-r--r--deps/v8/test/cctest/test-assembler-x87.cc96
-rw-r--r--deps/v8/test/cctest/test-ast.cc3
-rw-r--r--deps/v8/test/cctest/test-bignum-dtoa.cc72
-rw-r--r--deps/v8/test/cctest/test-bignum.cc620
-rw-r--r--deps/v8/test/cctest/test-bit-vector.cc2
-rw-r--r--deps/v8/test/cctest/test-checks.cc26
-rw-r--r--deps/v8/test/cctest/test-circular-queue.cc42
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc33
-rw-r--r--deps/v8/test/cctest/test-compiler.cc122
-rw-r--r--deps/v8/test/cctest/test-conversions.cc16
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc511
-rw-r--r--deps/v8/test/cctest/test-debug.cc25
-rw-r--r--deps/v8/test/cctest/test-declarative-accessors.cc302
-rw-r--r--deps/v8/test/cctest/test-decls.cc32
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-ppc.cc155
-rw-r--r--deps/v8/test/cctest/test-double.cc4
-rw-r--r--deps/v8/test/cctest/test-dtoa.cc74
-rw-r--r--deps/v8/test/cctest/test-fast-dtoa.cc48
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc98
-rw-r--r--deps/v8/test/cctest/test-fixed-dtoa.cc220
-rw-r--r--deps/v8/test/cctest/test-flags.cc2
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc2
-rw-r--r--deps/v8/test/cctest/test-global-object.cc2
-rw-r--r--deps/v8/test/cctest/test-hashing.cc8
-rw-r--r--deps/v8/test/cctest/test-hashmap.cc16
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc537
-rw-r--r--deps/v8/test/cctest/test-heap.cc128
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc2
-rw-r--r--deps/v8/test/cctest/test-lockers.cc45
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc8
-rw-r--r--deps/v8/test/cctest/test-log.cc17
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc2
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc3
-rw-r--r--deps/v8/test/cctest/test-migrations.cc2032
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc5
-rw-r--r--deps/v8/test/cctest/test-parsing.cc1386
-rw-r--r--deps/v8/test/cctest/test-platform.cc7
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc272
-rw-r--r--deps/v8/test/cctest/test-regexp.cc174
-rw-r--r--deps/v8/test/cctest/test-reloc-info.cc7
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc12
-rw-r--r--deps/v8/test/cctest/test-serialize.cc443
-rw-r--r--deps/v8/test/cctest/test-spaces.cc18
-rw-r--r--deps/v8/test/cctest/test-strings.cc18
-rw-r--r--deps/v8/test/cctest/test-symbols.cc2
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc59
-rw-r--r--deps/v8/test/cctest/test-transitions.cc29
-rw-r--r--deps/v8/test/cctest/test-types.cc269
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc218
-rw-r--r--deps/v8/test/cctest/test-unique.cc24
-rw-r--r--deps/v8/test/cctest/test-utils.cc2
-rw-r--r--deps/v8/test/cctest/test-version.cc8
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc30
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc25
-rw-r--r--deps/v8/test/cctest/trace-extension.cc17
-rw-r--r--deps/v8/test/cctest/types-fuzz.h34
-rw-r--r--deps/v8/test/js-perf-test/Classes/super.js42
-rw-r--r--deps/v8/test/message/super-constructor-extra-statement.out10
-rw-r--r--deps/v8/test/message/super-constructor.out7
-rw-r--r--deps/v8/test/message/super-in-function.js10
-rw-r--r--deps/v8/test/message/super-in-function.out7
-rw-r--r--deps/v8/test/mjsunit/accessors-no-prototype.js51
-rw-r--r--deps/v8/test/mjsunit/array-push12.js23
-rw-r--r--deps/v8/test/mjsunit/asm/int32modb.js26
-rw-r--r--deps/v8/test/mjsunit/asm/redundancy1.js26
-rw-r--r--deps/v8/test/mjsunit/asm/redundancy2.js29
-rw-r--r--deps/v8/test/mjsunit/asm/switch.js120
-rw-r--r--deps/v8/test/mjsunit/big-array-literal.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/opt-next-call.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-for-in.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-alignment.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-backedges1.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope-func.js27
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope-id.js40
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope.js116
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-follow.js61
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-for-let.js82
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-forin.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-forof.js35
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-function-id.js33
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-function-id2.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-function.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-manual1.js35
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-manual2.js35
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-maze1.js51
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-maze2.js63
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-multiple.js44
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-multiple2.js51
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-multiple3.js57
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested2.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested2b.js25
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested3.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested3b.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-regex-id.js54
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-sar.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-simple.js34
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-top1.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-top2.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-top3.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-warm.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-while-let.js58
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-3812.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-416359.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-445907.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-446647.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-447567.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-451012.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-452427.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-to-number-binop-deopt.js25
-rw-r--r--deps/v8/test/mjsunit/count-based-osr.js5
-rw-r--r--deps/v8/test/mjsunit/d8-os.js2
-rw-r--r--deps/v8/test/mjsunit/debug-script.js6
-rw-r--r--deps/v8/test/mjsunit/es6/array-tostring.js157
-rw-r--r--deps/v8/test/mjsunit/es6/iteration-syntax.js48
-rw-r--r--deps/v8/test/mjsunit/es6/object-tostring.js14
-rw-r--r--deps/v8/test/mjsunit/harmony/array-concat.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/array-from.js54
-rw-r--r--deps/v8/test/mjsunit/harmony/array-of.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/class-computed-property-names-super.js76
-rw-r--r--deps/v8/test/mjsunit/harmony/class-property-name-eval-arguments.js79
-rw-r--r--deps/v8/test/mjsunit/harmony/classes-experimental.js339
-rw-r--r--deps/v8/test/mjsunit/harmony/classes-lazy-parsing.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/classes-subclass-arrays.js150
-rw-r--r--deps/v8/test/mjsunit/harmony/classes.js76
-rw-r--r--deps/v8/test/mjsunit/harmony/computed-property-names-classes.js390
-rw-r--r--deps/v8/test/mjsunit/harmony/computed-property-names-object-literals-methods.js121
-rw-r--r--deps/v8/test/mjsunit/harmony/computed-property-names-super.js79
-rw-r--r--deps/v8/test/mjsunit/harmony/computed-property-names.js279
-rw-r--r--deps/v8/test/mjsunit/harmony/method-name-eval-arguments.js35
-rw-r--r--deps/v8/test/mjsunit/harmony/module-parsing-eval.js (renamed from deps/v8/test/mjsunit/regress/regress-1145.js)35
-rw-r--r--deps/v8/test/mjsunit/harmony/module-parsing.js190
-rw-r--r--deps/v8/test/mjsunit/harmony/modules.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/object-literals-method.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/object-literals-property-shorthand.js22
-rw-r--r--deps/v8/test/mjsunit/harmony/object-literals-super.js35
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-flags.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-455141.js15
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-typedarray-out-of-bounds.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/rest-params-lazy-parsing.js17
-rw-r--r--deps/v8/test/mjsunit/harmony/rest-params.js182
-rw-r--r--deps/v8/test/mjsunit/harmony/super.js2497
-rw-r--r--deps/v8/test/mjsunit/harmony/templates.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/toMethod.js23
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js212
-rw-r--r--deps/v8/test/mjsunit/mirror-object.js6
-rw-r--r--deps/v8/test/mjsunit/mirror-regexp.js4
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js28
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status63
-rw-r--r--deps/v8/test/mjsunit/object-literal-multiple-fields.js96
-rw-r--r--deps/v8/test/mjsunit/object-literal-multiple-proto-fields.js21
-rw-r--r--deps/v8/test/mjsunit/osr-elements-kind.js3
-rw-r--r--deps/v8/test/mjsunit/property-name-eval-arguments.js59
-rw-r--r--deps/v8/test/mjsunit/regress-sync-optimized-lists.js4
-rw-r--r--deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1118.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2825.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3032.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3501.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-379770.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3859.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3865.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3884.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-437713.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-444805.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-444805.js-script11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-446389.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-447526.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-447561.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-448711.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-449070.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-449291.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-450895.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-451322.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-451958.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-453481.js127
-rw-r--r--deps/v8/test/mjsunit/regress/regress-454725.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-455212.js (renamed from deps/v8/test/mjsunit/regress/regress-634.js)13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-457935.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-458876.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-458987.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-459955.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-arg-materialize-store.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-150545.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-387599.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-448730.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-450642.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-450960.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-451013.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-451016.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-451770.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-454091.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-455644.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deoptimize-constant-keyed-load.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-merge-descriptors.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-undefined-nan.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-undefined-nan2.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-undefined-nan3.js32
-rw-r--r--deps/v8/test/mjsunit/regress/string-set-char-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/strict-mode.js89
-rw-r--r--deps/v8/test/mjsunit/strong/classes.js17
-rw-r--r--deps/v8/test/mjsunit/strong/delete.js11
-rw-r--r--deps/v8/test/mjsunit/strong/empty-statement.js18
-rw-r--r--deps/v8/test/mjsunit/strong/equality.js10
-rw-r--r--deps/v8/test/mjsunit/strong/for-in.js17
-rw-r--r--deps/v8/test/mjsunit/strong/functions.js33
-rw-r--r--deps/v8/test/mjsunit/strong/mutually-recursive-funcs.js25
-rw-r--r--deps/v8/test/mjsunit/strong/use-strong.js27
-rw-r--r--deps/v8/test/mjsunit/strong/var-let-const.js22
-rw-r--r--deps/v8/test/mjsunit/testcfg.py8
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test.default10
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown10
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic10
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.js8
-rw-r--r--deps/v8/test/mozilla/mozilla.status15
-rw-r--r--deps/v8/test/test262-es6/test262-es6.status19
-rw-r--r--deps/v8/test/test262/test262.status11
-rw-r--r--deps/v8/test/unittests/base/cpu-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/logging-unittest.cc19
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc18
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc79
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc59
-rw-r--r--deps/v8/test/unittests/compiler/change-lowering-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc81
-rw-r--r--deps/v8/test/unittests/compiler/control-equivalence-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc70
-rw-r--r--deps/v8/test/unittests/compiler/control-reducer-unittest.cc124
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc574
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc32
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h6
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc47
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.h12
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc113
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.h45
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc215
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc20
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc68
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc451
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc45
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/move-optimizer-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/node-properties-unittest.cc59
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc334
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h27
-rw-r--r--deps/v8/test/unittests/compiler/node-unittest.cc170
-rw-r--r--deps/v8/test/unittests/compiler/opcodes-unittest.cc122
-rw-r--r--deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler/register-allocator-unittest.cc42
-rw-r--r--deps/v8/test/unittests/compiler/schedule-unittest.cc218
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc2018
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc202
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc (renamed from deps/v8/test/cctest/compiler/test-typer.cc)242
-rw-r--r--deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc187
-rw-r--r--deps/v8/test/unittests/compiler/zone-pool-unittest.cc8
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc7
-rw-r--r--deps/v8/test/unittests/test-utils.cc1
-rw-r--r--deps/v8/test/unittests/test-utils.h21
-rw-r--r--deps/v8/test/unittests/unittests.gyp18
-rw-r--r--deps/v8/test/webkit/exception-for-nonobject-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/toString-overrides-expected.txt5
-rw-r--r--deps/v8/test/webkit/object-literal-syntax-expected.txt29
-rw-r--r--deps/v8/test/webkit/object-literal-syntax.js28
-rw-r--r--deps/v8/test/webkit/run-json-stringify-expected.txt4
-rw-r--r--deps/v8/test/webkit/webkit.status6
-rw-r--r--deps/v8/testing/gmock-support.h29
-rw-r--r--deps/v8/testing/gtest-support.h19
-rw-r--r--deps/v8/tools/gcmole/gcmole.lua80
-rwxr-xr-xdeps/v8/tools/gcmole/parallel.py44
-rwxr-xr-xdeps/v8/tools/grokdump.py55
-rw-r--r--deps/v8/tools/gyp/v8.gyp101
-rw-r--r--deps/v8/tools/logreader.js39
-rw-r--r--deps/v8/tools/ninja/ninja_output.py44
-rw-r--r--deps/v8/tools/parser-shell.cc4
-rwxr-xr-xdeps/v8/tools/presubmit.py27
-rw-r--r--deps/v8/tools/profile.js28
-rwxr-xr-xdeps/v8/tools/push-to-trunk/bump_up_version.py247
-rwxr-xr-xdeps/v8/tools/push-to-trunk/generate_version.py78
-rwxr-xr-xdeps/v8/tools/release/auto_push.py (renamed from deps/v8/tools/push-to-trunk/auto_push.py)65
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py (renamed from deps/v8/tools/push-to-trunk/auto_roll.py)34
-rwxr-xr-xdeps/v8/tools/release/auto_tag.py (renamed from deps/v8/tools/push-to-trunk/auto_tag.py)0
-rwxr-xr-xdeps/v8/tools/release/check_clusterfuzz.py (renamed from deps/v8/tools/push-to-trunk/check_clusterfuzz.py)0
-rwxr-xr-xdeps/v8/tools/release/chromium_roll.py (renamed from deps/v8/tools/push-to-trunk/chromium_roll.py)44
-rw-r--r--deps/v8/tools/release/common_includes.py (renamed from deps/v8/tools/push-to-trunk/common_includes.py)121
-rwxr-xr-xdeps/v8/tools/release/create_release.py313
-rw-r--r--deps/v8/tools/release/git_recipes.py (renamed from deps/v8/tools/push-to-trunk/git_recipes.py)0
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py (renamed from deps/v8/tools/push-to-trunk/merge_to_branch.py)26
-rwxr-xr-xdeps/v8/tools/release/push_to_candidates.py (renamed from deps/v8/tools/push-to-trunk/push_to_trunk.py)208
-rwxr-xr-xdeps/v8/tools/release/releases.py (renamed from deps/v8/tools/push-to-trunk/releases.py)165
-rwxr-xr-xdeps/v8/tools/release/script_test.py (renamed from deps/v8/tools/push-to-trunk/script_test.py)0
-rw-r--r--deps/v8/tools/release/test_scripts.py (renamed from deps/v8/tools/push-to-trunk/test_scripts.py)598
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py6
-rwxr-xr-xdeps/v8/tools/run-tests.py82
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py5
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py2
-rw-r--r--deps/v8/tools/testrunner/local/utils.py4
-rw-r--r--deps/v8/tools/testrunner/objects/context.py8
-rw-r--r--deps/v8/tools/tickprocessor-driver.js3
-rw-r--r--deps/v8/tools/tickprocessor.js16
-rw-r--r--deps/v8/tools/v8heapconst.py422
-rw-r--r--deps/v8/tools/vim/ninja-build.vim119
-rw-r--r--deps/v8/tools/whitespace.txt4
972 files changed, 68748 insertions, 40216 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index f720bee948..2eac3035c3 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -29,6 +29,7 @@
.settings
.*.sw?
bsuite
+compile_commands.json
d8
d8_g
gccauses
diff --git a/deps/v8/.ycm_extra_conf.py b/deps/v8/.ycm_extra_conf.py
new file mode 100644
index 0000000000..e065a0896b
--- /dev/null
+++ b/deps/v8/.ycm_extra_conf.py
@@ -0,0 +1,193 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Autocompletion config for YouCompleteMe in V8.
+#
+# USAGE:
+#
+# 1. Install YCM [https://github.com/Valloric/YouCompleteMe]
+# (Googlers should check out [go/ycm])
+#
+# 2. Profit
+#
+#
+# Usage notes:
+#
+# * You must use ninja & clang to build V8.
+#
+# * You must have run gyp_v8 and built V8 recently.
+#
+#
+# Hacking notes:
+#
+# * The purpose of this script is to construct an accurate enough command line
+# for YCM to pass to clang so it can build and extract the symbols.
+#
+# * Right now, we only pull the -I and -D flags. That seems to be sufficient
+# for everything I've used it for.
+#
+# * That whole ninja & clang thing? We could support other configs if someone
+# were willing to write the correct commands and a parser.
+#
+# * This has only been tested on gTrusty.
+
+
+import os
+import os.path
+import subprocess
+import sys
+
+
+# Flags from YCM's default config.
+flags = [
+'-DUSE_CLANG_COMPLETER',
+'-std=gnu++0x',
+'-x',
+'c++',
+]
+
+
+def PathExists(*args):
+ return os.path.exists(os.path.join(*args))
+
+
+def FindV8SrcFromFilename(filename):
+ """Searches for the root of the V8 checkout.
+
+ Simply checks parent directories until it finds .gclient and v8/.
+
+ Args:
+ filename: (String) Path to source file being edited.
+
+ Returns:
+ (String) Path of 'v8/', or None if unable to find.
+ """
+ curdir = os.path.normpath(os.path.dirname(filename))
+ while not (PathExists(curdir, 'v8') and PathExists(curdir, 'v8', 'DEPS')
+ and (PathExists(curdir, '.gclient')
+ or PathExists(curdir, 'v8', '.git'))):
+ nextdir = os.path.normpath(os.path.join(curdir, '..'))
+ if nextdir == curdir:
+ return None
+ curdir = nextdir
+ return os.path.join(curdir, 'v8')
+
+
+def GetClangCommandFromNinjaForFilename(v8_root, filename):
+ """Returns the command line to build |filename|.
+
+ Asks ninja how it would build the source file. If the specified file is a
+ header, tries to find its companion source file first.
+
+ Args:
+ v8_root: (String) Path to v8/.
+ filename: (String) Path to source file being edited.
+
+ Returns:
+ (List of Strings) Command line arguments for clang.
+ """
+ if not v8_root:
+ return []
+
+ # Generally, everyone benefits from including V8's root, because all of
+ # V8's includes are relative to that.
+ v8_flags = ['-I' + os.path.join(v8_root)]
+
+ # Version of Clang used to compile V8 can be newer then version of
+ # libclang that YCM uses for completion. So it's possible that YCM's libclang
+ # doesn't know about some used warning options, which causes compilation
+ # warnings (and errors, because of '-Werror');
+ v8_flags.append('-Wno-unknown-warning-option')
+
+ # Header files can't be built. Instead, try to match a header file to its
+ # corresponding source file.
+ if filename.endswith('.h'):
+ alternates = ['.cc', '.cpp']
+ for alt_extension in alternates:
+ alt_name = filename[:-2] + alt_extension
+ if os.path.exists(alt_name):
+ filename = alt_name
+ break
+ else:
+ if filename.endswith('-inl.h'):
+ for alt_extension in alternates:
+ alt_name = filename[:-6] + alt_extension
+ if os.path.exists(alt_name):
+ filename = alt_name
+ break;
+ else:
+ # If this is a standalone -inl.h file with no source, the best we can
+ # do is try to use the default flags.
+ return v8_flags
+ else:
+ # If this is a standalone .h file with no source, the best we can do is
+ # try to use the default flags.
+ return v8_flags
+
+ sys.path.append(os.path.join(v8_root, 'tools', 'ninja'))
+ from ninja_output import GetNinjaOutputDirectory
+ out_dir = os.path.realpath(GetNinjaOutputDirectory(v8_root))
+
+ # Ninja needs the path to the source file relative to the output build
+ # directory.
+ rel_filename = os.path.relpath(os.path.realpath(filename), out_dir)
+
+ # Ask ninja how it would build our source file.
+ p = subprocess.Popen(['ninja', '-v', '-C', out_dir, '-t',
+ 'commands', rel_filename + '^'],
+ stdout=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if p.returncode:
+ return v8_flags
+
+ # Ninja might execute several commands to build something. We want the last
+ # clang command.
+ clang_line = None
+ for line in reversed(stdout.split('\n')):
+ if 'clang' in line:
+ clang_line = line
+ break
+ else:
+ return v8_flags
+
+ # Parse flags that are important for YCM's purposes.
+ for flag in clang_line.split(' '):
+ if flag.startswith('-I'):
+ # Relative paths need to be resolved, because they're relative to the
+ # output dir, not the source.
+ if flag[2] == '/':
+ v8_flags.append(flag)
+ else:
+ abs_path = os.path.normpath(os.path.join(out_dir, flag[2:]))
+ v8_flags.append('-I' + abs_path)
+ elif flag.startswith('-std'):
+ v8_flags.append(flag)
+ elif flag.startswith('-') and flag[1] in 'DWFfmO':
+ if flag == '-Wno-deprecated-register' or flag == '-Wno-header-guard':
+ # These flags causes libclang (3.3) to crash. Remove it until things
+ # are fixed.
+ continue
+ v8_flags.append(flag)
+
+ return v8_flags
+
+
+def FlagsForFile(filename):
+ """This is the main entry point for YCM. Its interface is fixed.
+
+ Args:
+ filename: (String) Path to source file being edited.
+
+ Returns:
+ (Dictionary)
+ 'flags': (List of Strings) Command line flags.
+ 'do_cache': (Boolean) True if the result should be cached.
+ """
+ v8_root = FindV8SrcFromFilename(filename)
+ v8_flags = GetClangCommandFromNinjaForFilename(v8_root, filename)
+ final_flags = flags + v8_flags
+ return {
+ 'flags': final_flags,
+ 'do_cache': True
+ }
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 6cda4f2397..1965fb18c7 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -3,78 +3,96 @@
#
# Name/Organization <email address>
-Google Inc.
-Sigma Designs Inc.
-ARM Ltd.
-Hewlett-Packard Development Company, LP
-Igalia, S.L.
-Joyent, Inc.
-Bloomberg Finance L.P.
-NVIDIA Corporation
-BlackBerry Limited
-Opera Software ASA
-Intel Corporation
-MIPS Technologies, Inc.
-Imagination Technologies, LLC
-Loongson Technology Corporation Limited
+Google Inc. <*@google.com>
+The Chromium Authors <*@chromium.org>
+Sigma Designs Inc. <*@sdesigns.com>
+ARM Ltd. <*@arm.com>
+Hewlett-Packard Development Company, LP <*@palm.com>
+Igalia, S.L. <*@igalia.com>
+Joyent, Inc. <*@joyent.com>
+Bloomberg Finance L.P. <*@bloomberg.net>
+NVIDIA Corporation <*@nvidia.com>
+BlackBerry Limited <*@blackberry.com>
+Opera Software ASA <*@opera.com>
+Intel Corporation <*@intel.com>
+MIPS Technologies, Inc. <*@mips.com>
+Imagination Technologies, LLC <*@imgtec.com>
+Loongson Technology Corporation Limited <*@loongson.cn>
+Code Aurora Forum <*@codeaurora.org>
+Home Jinni Inc. <*@homejinni.com>
+IBM Inc. <*@*.ibm.com>
+Samsung <*@*.samsung.com>
+Joyent, Inc <*@joyent.com>
+RT-RK Computer Based System <*@rt-rk.com>
+Amazon, Inc <*@amazon.com>
+ST Microelectronics <*@st.com>
+Yandex LLC <*@yandex-team.ru>
+StrongLoop, Inc. <*@strongloop.com>
+Aaron Bieber <deftly@gmail.com>
+Abdulla Kamar <abdulla.kamar@gmail.com>
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
-Alexandre Rames <alexandre.rames@arm.com>
Alexandre Vassalotti <avassalotti@gmail.com>
+Alexis Campailla <alexis@janeasystems.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
-Baptiste Afsa <baptiste.afsa@arm.com>
+Andrew Paprocki <andrew@ishiboo.com>
+Andrei Kashcha <anvaka@gmail.com>
+Ben Noordhuis <info@bnoordhuis.nl>
Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
-Chunyang Dai <chunyang.dai@intel.com>
+Christopher A. Taylor <chris@gameclosure.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
-Derek J Conrod <dconrod@codeaurora.org>
-Dineel D Sule <dsule@codeaurora.org>
+Douglas Crosher <dtc-v8@scieneer.com>
Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com>
+Felix Geisendörfer <haimuiba@gmail.com>
Filipe David Manana <fdmanana@gmail.com>
-Haitao Feng <haitao.feng@intel.com>
+Geoffrey Garside <ggarside@gmail.com>
+Han Choongwoo <cwhan.tunz@gmail.com>
+Hirofumi Mako <mkhrfm@gmail.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Isiah Meadows <impinball@gmail.com>
-Jacob Bramley <jacob.bramley@arm.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
+Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Joel Stanley <joel.stan@gmail.com>
Johan Bergström <johan@bergstroem.nu>
-John Jozwiak <jjozwiak@codeaurora.org>
Jonathan Liu <net147@gmail.com>
-Kun Zhang <zhangk@codeaurora.org>
+Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Luis Reis <luis.m.reis@gmail.com>
-Martyn Capewell <martyn.capewell@arm.com>
+Luke Zarko <lukezarko@gmail.com>
+Maciej Małecki <me@mmalecki.com>
Mathias Bynens <mathias@qiwi.be>
Matt Hanselman <mjhanselman@gmail.com>
+Matthew Sporleder <msporleder@gmail.com>
Maxim Mossienko <maxim.mossienko@gmail.com>
Michael Lutz <michi@icosahedron.de>
Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com>
+Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
+Paul Lind <plind44@gmail.com>
Rafal Krypa <rafal@krypa.net>
-Rajeev R Krithivasan <rkrithiv@codeaurora.org>
Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
-Rodolph Perfetta <rodolph.perfetta@arm.com>
-Ryan Dahl <coldredlemur@gmail.com>
+Robert Nagy <robert.nagy@gmail.com>
+Ryan Dahl <ry@tinyclouds.org>
Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
-Subrato K De <subratokde@codeaurora.org>
+Seo Sanghyeon <sanxiyn@gmail.com>
Tobias Burnus <burnus@net-b.de>
-Vincent Belliard <vincent.belliard@arm.com>
+Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
-Weiliang Lin<weiliang.lin@intel.com>
-Xi Qian <xi.qian@intel.com>
-Yuqiang Xian <yuqiang.xian@intel.com>
-Zaheer Ahmad <zahmad@codeaurora.org>
+Vladimir Shutoff <vovan@shutoff.ru>
+Yu Yin <xwafish@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
+柳荣一 <admin@web-tinker.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 6534eea859..713ab6de57 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -18,10 +18,9 @@ v8_interpreted_regexp = false
v8_object_print = false
v8_postmortem_support = false
v8_use_snapshot = true
-v8_enable_extra_checks = is_debug
v8_target_arch = cpu_arch
v8_random_seed = "314159265"
-
+v8_toolset_for_d8 = "host"
###############################################################################
# Configurations
@@ -63,54 +62,31 @@ config("features") {
defines = []
if (v8_enable_disassembler == true) {
- defines += [
- "ENABLE_DISASSEMBLER",
- ]
+ defines += [ "ENABLE_DISASSEMBLER" ]
}
if (v8_enable_gdbjit == true) {
- defines += [
- "ENABLE_GDB_JIT_INTERFACE",
- ]
+ defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
}
if (v8_object_print == true) {
- defines += [
- "OBJECT_PRINT",
- ]
+ defines += [ "OBJECT_PRINT" ]
}
if (v8_enable_verify_heap == true) {
- defines += [
- "VERIFY_HEAP",
- ]
+ defines += [ "VERIFY_HEAP" ]
}
if (v8_interpreted_regexp == true) {
- defines += [
- "V8_INTERPRETED_REGEXP",
- ]
+ defines += [ "V8_INTERPRETED_REGEXP" ]
}
if (v8_deprecation_warnings == true) {
- defines += [
- "V8_DEPRECATION_WARNINGS",
- ]
+ defines += [ "V8_DEPRECATION_WARNINGS" ]
}
if (v8_enable_i18n_support == true) {
- defines += [
- "V8_I18N_SUPPORT",
- ]
- }
- if (v8_enable_extra_checks == true) {
- defines += [
- "ENABLE_EXTRA_CHECKS",
- ]
+ defines += [ "V8_I18N_SUPPORT" ]
}
if (v8_enable_handle_zapping == true) {
- defines += [
- "ENABLE_HANDLE_ZAPPING",
- ]
+ defines += [ "ENABLE_HANDLE_ZAPPING" ]
}
if (v8_use_external_startup_data == true) {
- defines += [
- "V8_USE_EXTERNAL_STARTUP_DATA",
- ]
+ defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
}
}
@@ -120,27 +96,45 @@ config("toolchain") {
defines = []
cflags = []
- # TODO(jochen): Add support for arm, mips, mipsel.
+ # TODO(jochen): Add support for arm subarchs, mips, mipsel.
+
+ if (v8_target_arch == "arm") {
+ defines += [ "V8_TARGET_ARCH_ARM" ]
+ if (arm_version == 7) {
+ defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
+ }
+ if (arm_fpu == "vfpv3-d16") {
+ defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ]
+ }
+ if (arm_fpu == "vfpv3") {
+ defines += [
+ "CAN_USE_VFP3_INSTRUCTIONS",
+ "CAN_USE_VFP32DREGS",
+ ]
+ }
+ if (arm_fpu == "neon") {
+ defines += [
+ "CAN_USE_VFP3_INSTRUCTIONS",
+ "CAN_USE_VFP32DREGS",
+ "CAN_USE_NEON",
+ ]
+ }
+
+ # TODO(jochen): Add support for arm_test_noprobe.
+ # TODO(jochen): Add support for cpu_arch != v8_target_arch/
+ }
if (v8_target_arch == "arm64") {
- defines += [
- "V8_TARGET_ARCH_ARM64",
- ]
+ defines += [ "V8_TARGET_ARCH_ARM64" ]
}
if (v8_target_arch == "x86") {
- defines += [
- "V8_TARGET_ARCH_IA32",
- ]
+ defines += [ "V8_TARGET_ARCH_IA32" ]
}
if (v8_target_arch == "x64") {
- defines += [
- "V8_TARGET_ARCH_X64",
- ]
+ defines += [ "V8_TARGET_ARCH_X64" ]
}
if (is_win) {
- defines += [
- "WIN32",
- ]
+ defines += [ "WIN32" ]
# TODO(jochen): Support v8_enable_prof.
}
@@ -170,7 +164,7 @@ action("js2c") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
- source_prereqs = [ "tools/jsmin.py" ]
+ inputs = [ "tools/jsmin.py" ]
sources = [
"src/runtime.js",
@@ -181,7 +175,6 @@ action("js2c") {
"src/uri.js",
"src/third_party/fdlibm/fdlibm.js",
"src/math.js",
- "src/apinatives.js",
"src/date.js",
"src/regexp.js",
"src/arraybuffer.js",
@@ -203,7 +196,7 @@ action("js2c") {
]
outputs = [
- "$target_gen_dir/libraries.cc"
+ "$target_gen_dir/libraries.cc",
]
if (v8_enable_i18n_support) {
@@ -211,15 +204,15 @@ action("js2c") {
}
args = [
- rebase_path("$target_gen_dir/libraries.cc", root_build_dir),
- "CORE",
- ] + rebase_path(sources, root_build_dir)
+ rebase_path("$target_gen_dir/libraries.cc", root_build_dir),
+ "CORE",
+ ] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
outputs += [ "$target_gen_dir/libraries.bin" ]
args += [
"--startup_blob",
- rebase_path("$target_gen_dir/libraries.bin", root_build_dir)
+ rebase_path("$target_gen_dir/libraries.bin", root_build_dir),
]
}
}
@@ -231,7 +224,7 @@ action("js2c_experimental") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
- source_prereqs = [ "tools/jsmin.py" ]
+ inputs = [ "tools/jsmin.py" ]
sources = [
"src/macros.py",
@@ -241,46 +234,64 @@ action("js2c_experimental") {
"src/harmony-array.js",
"src/harmony-array-includes.js",
"src/harmony-typedarray.js",
- "src/harmony-classes.js",
"src/harmony-tostring.js",
"src/harmony-templates.js",
- "src/harmony-regexp.js"
+ "src/harmony-regexp.js",
]
outputs = [
- "$target_gen_dir/experimental-libraries.cc"
+ "$target_gen_dir/experimental-libraries.cc",
]
args = [
- rebase_path("$target_gen_dir/experimental-libraries.cc", root_build_dir),
- "EXPERIMENTAL",
- ] + rebase_path(sources, root_build_dir)
+ rebase_path("$target_gen_dir/experimental-libraries.cc",
+ root_build_dir),
+ "EXPERIMENTAL",
+ ] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
outputs += [ "$target_gen_dir/libraries_experimental.bin" ]
args += [
"--startup_blob",
- rebase_path("$target_gen_dir/libraries_experimental.bin", root_build_dir)
+ rebase_path("$target_gen_dir/libraries_experimental.bin", root_build_dir),
]
}
}
+action("d8_js2c") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ script = "tools/js2c.py"
+
+ inputs = [
+ "src/d8.js",
+ "src/macros.py",
+ ]
+
+ outputs = [
+ "$target_gen_dir/d8-js.cc",
+ ]
+
+ args = rebase_path(outputs, root_build_dir) + [ "D8" ] +
+ rebase_path(inputs, root_build_dir)
+}
+
if (v8_use_external_startup_data) {
action("natives_blob") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":js2c",
- ":js2c_experimental"
+ ":js2c_experimental",
]
sources = [
"$target_gen_dir/libraries.bin",
- "$target_gen_dir/libraries_experimental.bin"
+ "$target_gen_dir/libraries_experimental.bin",
]
outputs = [
- "$root_out_dir/natives_blob.bin"
+ "$root_out_dir/natives_blob.bin",
]
script = "tools/concatenate-files.py"
@@ -300,23 +311,24 @@ action("postmortem-metadata") {
]
outputs = [
- "$target_gen_dir/debug-support.cc"
+ "$target_gen_dir/debug-support.cc",
]
- args =
- rebase_path(outputs, root_build_dir) +
- rebase_path(sources, root_build_dir)
+ args = rebase_path(outputs, root_build_dir) +
+ rebase_path(sources, root_build_dir)
}
action("run_mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- deps = [ ":mksnapshot($host_toolchain)" ]
+ deps = [
+ ":mksnapshot($host_toolchain)",
+ ]
script = "tools/run.py"
outputs = [
- "$target_gen_dir/snapshot.cc"
+ "$target_gen_dir/snapshot.cc",
]
args = [
@@ -324,24 +336,27 @@ action("run_mksnapshot") {
"root_out_dir") + "/mksnapshot",
root_build_dir),
"--log-snapshot-positions",
- "--logfile", rebase_path("$target_gen_dir/snapshot.log", root_build_dir),
- rebase_path("$target_gen_dir/snapshot.cc", root_build_dir)
+ "--logfile",
+ rebase_path("$target_gen_dir/snapshot.log", root_build_dir),
+ rebase_path("$target_gen_dir/snapshot.cc", root_build_dir),
]
if (v8_random_seed != "0") {
- args += [ "--random-seed", v8_random_seed ]
+ args += [
+ "--random-seed",
+ v8_random_seed,
+ ]
}
if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [
"--startup_blob",
- rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir)
+ rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir),
]
}
}
-
###############################################################################
# Source Sets (aka static libraries)
#
@@ -363,7 +378,11 @@ source_set("v8_nosnapshot") {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [ ":internal_config", ":features", ":toolchain" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
+ ]
}
source_set("v8_snapshot") {
@@ -384,7 +403,11 @@ source_set("v8_snapshot") {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [ ":internal_config", ":features", ":toolchain" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
+ ]
}
if (v8_use_external_startup_data) {
@@ -406,7 +429,11 @@ if (v8_use_external_startup_data) {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [ ":internal_config", ":features", ":toolchain" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
+ ]
}
}
@@ -424,6 +451,8 @@ source_set("v8_base") {
"src/allocation-tracker.h",
"src/api.cc",
"src/api.h",
+ "src/api-natives.cc",
+ "src/api-natives.h",
"src/arguments.cc",
"src/arguments.h",
"src/assembler.cc",
@@ -432,8 +461,6 @@ source_set("v8_base") {
"src/assert-scope.cc",
"src/ast-numbering.cc",
"src/ast-numbering.h",
- "src/ast-this-access-visitor.cc",
- "src/ast-this-access-visitor.h",
"src/ast-value-factory.cc",
"src/ast-value-factory.h",
"src/ast.cc",
@@ -478,6 +505,8 @@ source_set("v8_base") {
"src/compilation-statistics.h",
"src/compiler/access-builder.cc",
"src/compiler/access-builder.h",
+ "src/compiler/all-nodes.cc",
+ "src/compiler/all-nodes.h",
"src/compiler/ast-graph-builder.cc",
"src/compiler/ast-graph-builder.h",
"src/compiler/ast-loop-assignment-analyzer.cc",
@@ -498,6 +527,8 @@ source_set("v8_base") {
"src/compiler/control-builders.cc",
"src/compiler/control-builders.h",
"src/compiler/control-equivalence.h",
+ "src/compiler/control-flow-optimizer.cc",
+ "src/compiler/control-flow-optimizer.h",
"src/compiler/control-reducer.cc",
"src/compiler/control-reducer.h",
"src/compiler/diamond.h",
@@ -505,7 +536,6 @@ source_set("v8_base") {
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/generic-algorithm.h",
- "src/compiler/graph-builder.cc",
"src/compiler/graph-builder.h",
"src/compiler/graph-inl.h",
"src/compiler/graph-reducer.cc",
@@ -532,8 +562,8 @@ source_set("v8_base") {
"src/compiler/js-graph.h",
"src/compiler/js-inlining.cc",
"src/compiler/js-inlining.h",
- "src/compiler/js-intrinsic-builder.cc",
- "src/compiler/js-intrinsic-builder.h",
+ "src/compiler/js-intrinsic-lowering.cc",
+ "src/compiler/js-intrinsic-lowering.h",
"src/compiler/js-operator.cc",
"src/compiler/js-operator.h",
"src/compiler/js-typed-lowering.cc",
@@ -545,6 +575,7 @@ source_set("v8_base") {
"src/compiler/linkage.h",
"src/compiler/load-elimination.cc",
"src/compiler/load-elimination.h",
+ "src/compiler/loop-peeling.cc",
"src/compiler/loop-analysis.cc",
"src/compiler/loop-analysis.h",
"src/compiler/machine-operator-reducer.cc",
@@ -555,12 +586,13 @@ source_set("v8_base") {
"src/compiler/machine-type.h",
"src/compiler/move-optimizer.cc",
"src/compiler/move-optimizer.h",
- "src/compiler/node-aux-data-inl.h",
"src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc",
"src/compiler/node-cache.h",
+ "src/compiler/node-marker.cc",
+ "src/compiler/node-marker.h",
"src/compiler/node-matchers.h",
- "src/compiler/node-properties-inl.h",
+ "src/compiler/node-properties.cc",
"src/compiler/node-properties.h",
"src/compiler/node.cc",
"src/compiler/node.h",
@@ -570,6 +602,8 @@ source_set("v8_base") {
"src/compiler/operator-properties.h",
"src/compiler/operator.cc",
"src/compiler/operator.h",
+ "src/compiler/osr.cc",
+ "src/compiler/osr.h",
"src/compiler/pipeline.cc",
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
@@ -775,8 +809,6 @@ source_set("v8_base") {
"src/ic/ic-compiler.h",
"src/ic/stub-cache.cc",
"src/ic/stub-cache.h",
- "src/interface.cc",
- "src/interface.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter-irregexp.cc",
@@ -813,6 +845,8 @@ source_set("v8_base") {
"src/macro-assembler.h",
"src/messages.cc",
"src/messages.h",
+ "src/modules.cc",
+ "src/modules.h",
"src/msan.h",
"src/natives.h",
"src/objects-debug.cc",
@@ -855,7 +889,6 @@ source_set("v8_base") {
"src/rewriter.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
- "src/runtime/runtime-api.cc",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-classes.cc",
"src/runtime/runtime-collections.cc",
@@ -949,7 +982,6 @@ source_set("v8_base") {
"src/version.h",
"src/vm-state-inl.h",
"src/vm-state.h",
- "src/zone-inl.h",
"src/zone.cc",
"src/zone.h",
"src/third_party/fdlibm/fdlibm.cc",
@@ -1209,7 +1241,11 @@ source_set("v8_base") {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [ ":internal_config", ":features", ":toolchain" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
+ ]
if (!is_debug) {
configs -= [ "//build/config/compiler:optimize" ]
@@ -1217,7 +1253,9 @@ source_set("v8_base") {
}
defines = []
- deps = [ ":v8_libbase" ]
+ deps = [
+ ":v8_libbase",
+ ]
if (is_win) {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
@@ -1229,6 +1267,7 @@ source_set("v8_base") {
if (is_win) {
deps += [ "//third_party/icu:icudata" ]
}
+
# TODO(jochen): Add support for icu_use_data_file_flag
defines += [ "ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE" ]
} else {
@@ -1297,7 +1336,11 @@ source_set("v8_libbase") {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [ ":internal_config_base", ":features", ":toolchain" ]
+ configs += [
+ ":internal_config_base",
+ ":features",
+ ":toolchain",
+ ]
if (!is_debug) {
configs -= [ "//build/config/compiler:optimize" ]
@@ -1307,15 +1350,11 @@ source_set("v8_libbase") {
defines = []
if (is_posix) {
- sources += [
- "src/base/platform/platform-posix.cc"
- ]
+ sources += [ "src/base/platform/platform-posix.cc" ]
}
if (is_linux) {
- sources += [
- "src/base/platform/platform-linux.cc"
- ]
+ sources += [ "src/base/platform/platform-linux.cc" ]
libs = [ "rt" ]
} else if (is_android) {
@@ -1344,7 +1383,10 @@ source_set("v8_libbase") {
defines += [ "_CRT_RAND_S" ] # for rand_s()
- libs = [ "winmm.lib", "ws2_32.lib" ]
+ libs = [
+ "winmm.lib",
+ "ws2_32.lib",
+ ]
}
# TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
@@ -1363,7 +1405,11 @@ source_set("v8_libplatform") {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [ ":internal_config_base", ":features", ":toolchain" ]
+ configs += [
+ ":internal_config_base",
+ ":features",
+ ":toolchain",
+ ]
if (!is_debug) {
configs -= [ "//build/config/compiler:optimize" ]
@@ -1389,7 +1435,11 @@ if (current_toolchain == host_toolchain) {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [ ":internal_config", ":features", ":toolchain" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
+ ]
deps = [
":v8_base",
@@ -1405,64 +1455,109 @@ if (current_toolchain == host_toolchain) {
#
if (component_mode == "shared_library") {
+ component("v8") {
+ sources = [
+ "src/v8dll-main.cc",
+ ]
-component("v8") {
- sources = [
- "src/v8dll-main.cc",
- ]
+ if (v8_use_snapshot && v8_use_external_startup_data) {
+ deps = [
+ ":v8_base",
+ ":v8_external_snapshot",
+ ]
+ } else if (v8_use_snapshot) {
+ deps = [
+ ":v8_base",
+ ":v8_snapshot",
+ ]
+ } else {
+ assert(!v8_use_external_startup_data)
+ deps = [
+ ":v8_base",
+ ":v8_nosnapshot",
+ ]
+ }
- if (v8_use_snapshot && v8_use_external_startup_data) {
- deps = [
- ":v8_base",
- ":v8_external_snapshot",
- ]
- } else if (v8_use_snapshot) {
- deps = [
- ":v8_base",
- ":v8_snapshot",
- ]
- } else {
- assert(!v8_use_external_startup_data)
- deps = [
- ":v8_base",
- ":v8_nosnapshot",
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
]
- }
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [ ":internal_config", ":features", ":toolchain" ]
+ direct_dependent_configs = [ ":external_config" ]
- direct_dependent_configs = [ ":external_config" ]
+ libs = []
+ if (is_android && current_toolchain != host_toolchain) {
+ libs += [ "log" ]
+ }
+ }
+} else {
+ group("v8") {
+ if (v8_use_snapshot && v8_use_external_startup_data) {
+ deps = [
+ ":v8_base",
+ ":v8_external_snapshot",
+ ]
+ } else if (v8_use_snapshot) {
+ deps = [
+ ":v8_base",
+ ":v8_snapshot",
+ ]
+ } else {
+ assert(!v8_use_external_startup_data)
+ deps = [
+ ":v8_base",
+ ":v8_nosnapshot",
+ ]
+ }
- libs = []
- if (is_android && current_toolchain != host_toolchain) {
- libs += [ "log" ]
+ direct_dependent_configs = [ ":external_config" ]
}
}
-} else {
-
-group("v8") {
- if (v8_use_snapshot && v8_use_external_startup_data) {
- deps = [
- ":v8_base",
- ":v8_external_snapshot",
+if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
+ (current_toolchain != host_toolchain && v8_toolset_for_d8 == "target")) {
+ executable("d8") {
+ sources = [
+ "src/d8.cc",
+ "src/d8.h",
+ "src/startup-data-util.h",
+ "src/startup-data-util.cc",
]
- } else if (v8_use_snapshot) {
- deps = [
- ":v8_base",
- ":v8_snapshot",
+
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
]
- } else {
- assert(!v8_use_external_startup_data)
+
deps = [
- ":v8_base",
- ":v8_nosnapshot",
+ ":d8_js2c",
+ ":v8",
+ ":v8_libplatform",
+ "//build/config/sanitizers:deps",
]
- }
- direct_dependent_configs = [ ":external_config" ]
-}
+ # TODO(jochen): Add support for readline and vtunejit.
+
+ if (is_posix) {
+ sources += [ "src/d8-posix.cc" ]
+ } else if (is_win) {
+ sources += [ "src/d8-windows.cc" ]
+ }
+ if (component_mode != "shared_library") {
+ sources += [
+ "src/d8-debug.cc",
+ "$target_gen_dir/d8-js.cc",
+ ]
+ }
+ if (v8_enable_i18n_support) {
+ deps += [ "//third_party/icu" ]
+ }
+ }
}
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index d42a2f1564..0f835dc8c4 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,533 @@
+2015-02-19: Version 4.2.77
+
+ Make generator constructors configurable (issue 3902).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-19: Version 4.2.76
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-18: Version 4.2.75
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-18: Version 4.2.74
+
+ Correctly propagate terminate exception in TryCall (issue 3892).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-17: Version 4.2.73
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-17: Version 4.2.72
+
+ [turbofan] Fix control reducer with re-reducing branches (Chromium issue
+ 458876).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-16: Version 4.2.71
+
+ Implement ES6 rest parameters (issue 2159).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-13: Version 4.2.70
+
+ new classes: no longer experimental (issue 3834).
+
+ Make it possible to define arguments for CompileFunctionInContext.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-12: Version 4.2.69
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-11: Version 4.2.68
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-11: Version 4.2.67
+
+ Throw on range error when creating a string via API (issue 3853).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-11: Version 4.2.66
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-10: Version 4.2.65
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-10: Version 4.2.64
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-10: Version 4.2.63
+
+ Introduce a compile method that takes context extensions (Chromium issue
+ 456192).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-09: Version 4.2.62
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-09: Version 4.2.61
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-07: Version 4.2.60
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-07: Version 4.2.59
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-07: Version 4.2.58
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-06: Version 4.2.57
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-06: Version 4.2.56
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-06: Version 4.2.55
+
+ Protect against uninitialized lexical variables at top-level (Chromium
+ issue 452510).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-05: Version 4.2.54
+
+ Fix HConstant(double, ...) constructor (issue 3865).
+
+ Add NativeWeakMap to v8.h (Chromium issue 437416).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-05: Version 4.2.53
+
+ Fix issue with multiple properties and emit store (issue 3856).
+
+ Class methods should be non enumerable (issue 3330).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-04: Version 4.2.52
+
+ Add WeakKeyMap to v8.h (Chromium issue 437416).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-04: Version 4.2.51
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-03: Version 4.2.50
+
+ Reset inlining limits due to overly long compilation times in
+ Speedometer, Dart2JS (Chromium issue 454625).
+
+ Add WeakMap to v8.h (Chromium issue 437416).
+
+ [V8] Added line, column and script symbols for SyntaxError (Chromium
+ issue 443140).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-03: Version 4.2.49
+
+ Compute the same hash for all NaN values (issue 3859).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-03: Version 4.2.48
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-02: Version 4.2.47
+
+ Check global object behind global proxy for extensibility (Chromium
+ issue 454091).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-02: Version 4.2.46
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-02: Version 4.2.45
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-01: Version 4.2.44
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-01: Version 4.2.43
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-31: Version 4.2.42
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-31: Version 4.2.41
+
+ Layout descriptor sharing issue fixed (issue 3832, Chromium issue
+ 437713).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-30: Version 4.2.40
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-30: Version 4.2.38
+
+ Move object literal checking into checker classes (issue 3819).
+
+ [turbofan] Fix OSR compilations of for-in.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-30: Version 4.2.37
+
+ Do not create unresolved variables when parsing arrow functions lazily
+ (issue 3501).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-29: Version 4.2.36
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-29: Version 4.2.35
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-28: Version 4.2.34
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-28: Version 4.2.33
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-27: Version 4.2.32
+
+ Do not generalize field representations when making elements kind or
+ observed transition (Chromium issue 448711).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-27: Version 4.2.31
+
+ [x86] Disable AVX unless the operating system explicitly claims to
+ support it (issue 3846, Chromium issue 452033).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-27: Version 4.2.30
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-26: Version 4.2.29
+
+ MIPS: Fixed Hydrogen environment handling for mul-i ARM and ARM64 (issue
+ 451322).
+
+ [turbofan] Simplify reduction if IfTrue and IfFalse and fix bugs
+ (Chromium issue 451958).
+
+ Add HeapNumber fast path to v8::Value::{Uint,Int}32Value() (Chromium
+ issue 446097).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-26: Version 4.2.28
+
+ Fixed Hydrogen environment handling for mul-i on ARM and ARM64 (issue
+ 451322).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-25: Version 4.2.27
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-24: Version 4.2.26
+
+ ES6 Array.prototype.toString falls back on Object.prototype.toString if
+ method "join" is not callable (issue 3793).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-23: Version 4.2.25
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-23: Version 4.2.24
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-23: Version 4.2.23
+
+ [x86] Blacklist AVX for Windows versions before 6.1 (Windows 7) (issue
+ 3846).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-23: Version 4.2.22
+
+ Fix run-time ARMv6 detection (issue 3844).
+
+ Support concatenating with zero-size arrays with DICTIONARY_ELEMENTS in
+ Runtime_ArrayConcat (Chromium issue 450895).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-22: Version 4.2.21
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-22: Version 4.2.20
+
+ Add a pretty printer to improve the error message non-function calls
+ (Chromium issue 259443).
+
+ Remove implicit uint8_t to char cast in string replace (Chromium issue
+ 446196).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-21: Version 4.2.19
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-20: Version 4.2.18
+
+ Fix issue with __proto__ when using ES6 object literals (issue 3818).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-20: Version 4.2.17
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-20: Version 4.2.16
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-19: Version 4.2.15
+
+ Unobscurified OFStream (Chromium issue 448102).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-19: Version 4.2.14
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-18: Version 4.2.13
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-18: Version 4.2.12
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-17: Version 4.2.11
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-16: Version 4.2.10
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-16: Version 4.2.9
+
+ MIPS: ES6 computed property names (issue 3754).
+
+ ES6 computed property names (issue 3754).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-15: Version 4.2.8
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-15: Version 4.2.7
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-15: Version 4.2.6
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-15: Version 4.2.5
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-14: Version 4.2.4
+
+ Auto-generate v8 version based on tags (Chromium issue 446166).
+
+ Remove support for signatures with arguments.
+
+ Add proper support for proxies to HType (Chromium issue 448730).
+
+ [turbofan] Fix truncation/representation sloppiness wrt. bool/bit (issue
+ 3812).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-14: Version 4.2.3
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-14: Version 4.2.2
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-14: Version 4.2.1
+
+ Map -0 to integer 0 for typed array constructors (Chromium issue
+ 447756).
+
+ Introduce a gyp variable to control whether or not slow dchecks are on.
+
+ Correctly setup the freelist of the coderange on Win64 (Chromium issue
+ 447555).
+
+ Fast forward V8 to version 4.2.
+
+ Remove "extra checks".
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-08: Version 3.32.7
+
+ Correctly parse line ends for debugging (issue 2825).
+
+ Fixed printing during DCE (issue 3679).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-08: Version 3.32.6
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-08: Version 3.32.5
+
+ Correct handling of exceptions occured during getting of exception stack
+ trace (Chromium issue 444805).
+
+ Fix bug in Runtime_CompileOptimized resulting from stack overflow
+ (Chromium issue 446774).
+
+ Turn on job-based recompilation (issue 3608).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-01-07: Version 3.32.4
+
+ Performance and stability improvements on all platforms.
+
+
2015-01-07: Version 3.32.3
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index a81c7ecc38..b829d05dab 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,17 +8,17 @@ vars = {
deps = {
"v8/build/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "fe00999dfaee449d3465a9316778434884da4fa7", # from svn revision 2010
+ Var("git_url") + "/external/gyp.git" + "@" + "34640080d08ab2a37665512e52142947def3056d",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "51c1a4ce5f362676aa1f1cfdb5b7e52edabfa5aa",
+ Var("git_url") + "/chromium/deps/icu.git" + "@" + "4e3266f32c62d30a3f9e2232a753c60129d1e670",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "23a4e2f545c7b6340d7e5a2b74801941b0a86535",
+ Var("git_url") + "/chromium/buildtools.git" + "@" + "5c5e924788fe40f7d6e0a3841ac572de2475e689",
"v8/testing/gtest":
- Var("git_url") + "/external/googletest.git" + "@" + "8245545b6dc9c4703e6496d1efd19e975ad2b038", # from svn revision 700
+ Var("git_url") + "/external/googletest.git" + "@" + "be1868139ffe0ccd0e8e3b37292b84c821d9c8ad",
"v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "c945be21f6485fa177b43814f910b76cce921653",
+ Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "f6daa55d03995e82201a3278203e7c0421a59546",
}
deps_os = {
@@ -80,6 +80,17 @@ hooks = [
"-s", "v8/buildtools/linux64/clang-format.sha1",
],
},
+ # Pull binutils for linux, enabled debug fission for faster linking /
+ # debugging when used with clang on Ubuntu Precise.
+ # https://code.google.com/p/chromium/issues/detail?id=352046
+ {
+ 'name': 'binutils',
+ 'pattern': 'v8/third_party/binutils',
+ 'action': [
+ 'python',
+ 'v8/third_party/binutils/download.py',
+ ],
+ },
{
# Pull clang if needed or requested via GYP_DEFINES.
# Note: On Win, this should run after win_toolchain, as it may use it.
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 606b5d7bf1..5468d91334 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -27,8 +27,6 @@
# Variable default definitions. Override them by exporting them in your shell.
-CXX ?= g++
-LINK ?= g++
OUTDIR ?= out
TESTJOBS ?=
GYPFLAGS ?=
@@ -87,10 +85,17 @@ ifeq ($(snapshot), external)
endif
# extrachecks=on/off
ifeq ($(extrachecks), on)
- GYPFLAGS += -Dv8_enable_extra_checks=1 -Dv8_enable_handle_zapping=1
+ GYPFLAGS += -Ddcheck_always_on=1 -Dv8_enable_handle_zapping=1
endif
ifeq ($(extrachecks), off)
- GYPFLAGS += -Dv8_enable_extra_checks=0 -Dv8_enable_handle_zapping=0
+ GYPFLAGS += -Ddcheck_always_on=0 -Dv8_enable_handle_zapping=0
+endif
+# slowdchecks=on/off
+ifeq ($(slowdchecks), on)
+ GYPFLAGS += -Dv8_enable_slow_dchecks=1
+endif
+ifeq ($(slowdchecks), off)
+ GYPFLAGS += -Dv8_enable_slow_dchecks=0
endif
# gdbjit=on/off
ifeq ($(gdbjit), on)
@@ -103,10 +108,6 @@ endif
ifeq ($(vtunejit), on)
GYPFLAGS += -Dv8_enable_vtunejit=1
endif
-# optdebug=on
-ifeq ($(optdebug), on)
- GYPFLAGS += -Dv8_optimized_debug=2
-endif
# unalignedaccess=on
ifeq ($(unalignedaccess), on)
GYPFLAGS += -Dv8_can_use_unaligned_accesses=true
@@ -144,19 +145,17 @@ endif
ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
endif
-# asan=/path/to/clang++
-ifneq ($(strip $(asan)),)
- GYPFLAGS += -Dasan=1
- export CC=$(dir $(asan))clang
- export CXX=$(asan)
- export CXX_host=$(asan)
- export LINK=$(asan)
- export ASAN_SYMBOLIZER_PATH=$(dir $(asan))llvm-symbolizer
+# asan=on
+ifeq ($(asan), on)
+ GYPFLAGS += -Dasan=1 -Dclang=1
TESTFLAGS += --asan
ifeq ($(lsan), on)
GYPFLAGS += -Dlsan=1
endif
endif
+ifdef embedscript
+ GYPFLAGS += -Dembed_script=$(embedscript)
+endif
# arm specific flags.
# arm_version=<number | "default">
@@ -214,8 +213,6 @@ ifeq ($(arm_test_noprobe), on)
endif
# ----------------- available targets: --------------------
-# - "builddeps": pulls in external dependencies for building
-# - "dependencies": pulls in all external dependencies
# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
# - any mode listed in MODES
@@ -233,7 +230,7 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87
+ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 ppc ppc64
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
@@ -241,9 +238,15 @@ ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
-GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
- build/toolchain.gypi samples/samples.gyp src/d8.gyp \
- test/cctest/cctest.gyp test/unittests/unittests.gyp tools/gyp/v8.gyp
+GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
+ build/shim_headers.gypi build/features.gypi build/standalone.gypi \
+ build/toolchain.gypi build/all.gyp build/mac/asan.gyp \
+ build/android.gypi test/cctest/cctest.gyp \
+ test/unittests/unittests.gyp tools/gyp/v8.gyp \
+ tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
+ buildtools/third_party/libc++abi/libc++abi.gyp \
+ buildtools/third_party/libc++/libc++.gyp samples/samples.gyp \
+ src/third_party/vtune/v8vtune.gyp src/d8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
@@ -291,7 +294,6 @@ $(ARCHES): $(addprefix $$@.,$(DEFAULT_MODES))
# Defines how to build a particular target (e.g. ia32.release).
$(BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
- CXX="$(CXX)" LINK="$(LINK)" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print \
raw_input().replace('opt', '').capitalize()") \
@@ -299,7 +301,7 @@ $(BUILDS): $(OUTDIR)/Makefile.$$@
native: $(OUTDIR)/Makefile.native
@$(MAKE) -C "$(OUTDIR)" -f Makefile.native \
- CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
+ BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/$@"
$(ANDROID_ARCHES): $(addprefix $$@.,$(MODES))
@@ -423,6 +425,7 @@ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
$(eval CXX_TARGET_ARCH:=$(shell $(CXX) -v 2>&1 | grep ^Target: | \
cut -f 2 -d " " | cut -f 1 -d "-" ))
$(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
+ $(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH)))
$(eval V8_TARGET_ARCH:=$(subst .,,$(suffix $(basename $@))))
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
@@ -431,7 +434,7 @@ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
-Dv8_target_arch=$(V8_TARGET_ARCH) \
$(if $(findstring $(CXX_TARGET_ARCH),$(V8_TARGET_ARCH)), \
-Dtarget_arch=$(V8_TARGET_ARCH),) \
- $(if $(findstring optdebug,$@),-Dv8_optimized_debug=2,) \
+ $(if $(findstring optdebug,$@),-Dv8_optimized_debug=1,) \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
@@ -468,8 +471,11 @@ $(ENVFILE): $(ENVFILE).new
# Stores current GYPFLAGS in a file.
$(ENVFILE).new:
- @mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new; \
- echo "CXX=$(CXX)" >> $(ENVFILE).new
+ $(eval CXX_TARGET_ARCH:=$(shell $(CXX) -v 2>&1 | grep ^Target: | \
+ cut -f 2 -d " " | cut -f 1 -d "-" ))
+ $(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
+ $(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH)))
+ @mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS) -Dtarget_arch=$(CXX_TARGET_ARCH)" > $(ENVFILE).new;
# Heap constants for grokdump.
DUMP_FILE = tools/v8heapconst.py
@@ -489,26 +495,5 @@ GPATH GRTAGS GSYMS GTAGS: gtags.files $(shell cat gtags.files 2> /dev/null)
gtags.clean:
rm -f gtags.files GPATH GRTAGS GSYMS GTAGS
-# Dependencies. "builddeps" are dependencies required solely for building,
-# "dependencies" includes also dependencies required for development.
-# Remember to keep these in sync with the DEPS file.
-builddeps:
- svn checkout --force https://gyp.googlecode.com/svn/trunk build/gyp \
- --revision 1831
- if svn info third_party/icu 2>&1 | grep -q icu46 ; then \
- svn switch --force \
- https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \
- third_party/icu --revision 277999 ; \
- else \
- svn checkout --force \
- https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \
- third_party/icu --revision 277999 ; \
- fi
- svn checkout --force https://googletest.googlecode.com/svn/trunk \
- testing/gtest --revision 692
- svn checkout --force https://googlemock.googlecode.com/svn/trunk \
- testing/gmock --revision 485
-
-dependencies: builddeps
- # The spec is a copy of the hooks in v8's DEPS file.
- gclient sync -r fb782d4369d5ae04f17a2fceef7de5a63e50f07b --spec="solutions = [{u'managed': False, u'name': u'buildtools', u'url': u'https://chromium.googlesource.com/chromium/buildtools.git', u'custom_deps': {}, u'custom_hooks': [{u'name': u'clang_format_win',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=win32',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/win/clang-format.exe.sha1']},{u'name': u'clang_format_mac',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=darwin',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/mac/clang-format.sha1']},{u'name': u'clang_format_linux',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=linux*',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/linux64/clang-format.sha1']}],u'deps_file': u'.DEPS.git', u'safesync_url': u''}]"
+dependencies builddeps:
+ $(error Use 'gclient sync' instead)
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 22a05cb177..d6db77ffe0 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -1,4 +1,5 @@
adamk@chromium.org
+arv@chromium.org
bmeurer@chromium.org
danno@chromium.org
dcarney@chromium.org
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 040972e8da..fd0601f17b 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -69,6 +69,7 @@ def _V8PresubmitChecks(input_api, output_api):
from presubmit import SourceProcessor
from presubmit import CheckRuntimeVsNativesNameClashes
from presubmit import CheckExternalReferenceRegistration
+ from presubmit import CheckAuthorizedAuthor
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
@@ -83,6 +84,7 @@ def _V8PresubmitChecks(input_api, output_api):
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
+ results.extend(CheckAuthorizedAuthor(input_api, output_api))
return results
@@ -242,15 +244,17 @@ def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
- 'v8_linux_dbg': set(['defaulttests']),
- 'v8_linux_nosnap_rel': set(['defaulttests']),
+ 'v8_linux_nodcheck_rel': set(['defaulttests']),
+ 'v8_linux_gcc_compile_rel': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
- 'v8_linux_arm_dbg': set(['defaulttests']),
+ 'v8_linux64_asan_rel': set(['defaulttests']),
+ 'v8_win_rel': set(['defaulttests']),
+ 'v8_win_compile_dbg': set(['defaulttests']),
+ 'v8_win64_rel': set(['defaulttests']),
+ 'v8_mac_rel': set(['defaulttests']),
+ 'v8_linux_arm_rel': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
- 'v8_linux_layout_dbg': set(['defaulttests']),
+ 'v8_android_arm_compile_rel': set(['defaulttests']),
'v8_linux_chromium_gn_rel': set(['defaulttests']),
- 'v8_mac_rel': set(['defaulttests']),
- 'v8_win_rel': set(['defaulttests']),
- 'v8_win64_compile_rel': set(['defaulttests']),
},
}
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
index 465eba9148..2eadca3384 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/build/features.gypi
@@ -102,13 +102,9 @@
'DebugBaseCommon': {
'abstract': 1,
'variables': {
- 'v8_enable_extra_checks%': 1,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
- ['v8_enable_extra_checks==1', {
- 'defines': ['ENABLE_EXTRA_CHECKS',],
- }],
['v8_enable_handle_zapping==1', {
'defines': ['ENABLE_HANDLE_ZAPPING',],
}],
@@ -116,13 +112,9 @@
}, # Debug
'Release': {
'variables': {
- 'v8_enable_extra_checks%': 0,
'v8_enable_handle_zapping%': 0,
},
'conditions': [
- ['v8_enable_extra_checks==1', {
- 'defines': ['ENABLE_EXTRA_CHECKS',],
- }],
['v8_enable_handle_zapping==1', {
'defines': ['ENABLE_HANDLE_ZAPPING',],
}],
diff --git a/deps/v8/build/mac/asan.gyp b/deps/v8/build/mac/asan.gyp
new file mode 100644
index 0000000000..3fc7f58d43
--- /dev/null
+++ b/deps/v8/build/mac/asan.gyp
@@ -0,0 +1,31 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'asan_dynamic_runtime',
+ 'toolsets': ['target', 'host'],
+ 'type': 'none',
+ 'variables': {
+ # Every target is going to depend on asan_dynamic_runtime, so allow
+ # this one to depend on itself.
+ 'prune_self_dependency': 1,
+ # Path is relative to this GYP file.
+ 'asan_rtl_mask_path':
+ '../../third_party/llvm-build/Release+Asserts/lib/clang/*/lib/darwin',
+ 'asan_osx_dynamic':
+ '<(asan_rtl_mask_path)/libclang_rt.asan_osx_dynamic.dylib',
+ },
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)',
+ 'files': [
+ '<!(/bin/ls <(asan_osx_dynamic))',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index ee91e78899..56cebbe1f3 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -33,10 +33,8 @@
'includes': ['toolchain.gypi'],
'variables': {
'component%': 'static_library',
- 'make_clang_dir%': '../third_party/llvm-build/Release+Asserts',
+ 'clang_dir%': 'third_party/llvm-build/Release+Asserts',
'clang_xcode%': 0,
- 'asan%': 0,
- 'tsan%': 0,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
@@ -48,7 +46,7 @@
'variables': {
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
- OS=="netbsd" or OS=="mac" or OS=="qnx"', {
+ OS=="netbsd" or OS=="mac" or OS=="qnx" or OS=="aix"', {
# This handles the Unix platforms we generally deal with.
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
@@ -56,7 +54,7 @@
'host_arch%': '<!pymod_do_main(detect_v8_host_arch)',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
- # OS!="netbsd" and OS!="mac"
+ # OS!="netbsd" and OS!="mac" and OS!="aix"
'host_arch%': 'ia32',
}],
],
@@ -67,11 +65,27 @@
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
+
+ # goma settings.
+ # 1 to use goma.
+ # If no gomadir is set, it uses the default gomadir.
+ 'use_goma%': 0,
+ 'gomadir%': '',
+ 'conditions': [
+ # Set default gomadir.
+ ['OS=="win"', {
+ 'gomadir': 'c:\\goma\\goma-win',
+ }, {
+ 'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
+ }],
+ ],
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)',
'werror%': '-Werror',
+ 'use_goma%': '<(use_goma)',
+ 'gomadir%': '<(gomadir)',
# .gyp files or targets should set v8_code to 1 if they build V8 specific
# code, as opposed to external code. This variable is used to control such
@@ -82,15 +96,13 @@
# Speeds up Debug builds:
# 0 - Compiler optimizations off (debuggable) (default). This may
# be 5x slower than Release (or worse).
- # 1 - Turn on compiler optimizations. This may be hard or impossible to
- # debug. This may still be 2x slower than Release (or worse).
- # 2 - Turn on optimizations, and also #undef DEBUG / #define NDEBUG
- # (but leave V8_ENABLE_CHECKS and most other assertions enabled.
- # This may cause some v8 tests to fail in the Debug configuration.
- # This roughly matches the performance of a Release build and can
- # be used by embedders that need to build their own code as debug
- # but don't want or need a debug version of V8. This should produce
- # near-release speeds.
+ # 1 - Turn on optimizations and disable slow DCHECKs, but leave
+ # V8_ENABLE_CHECKS and most other assertions enabled. This may cause
+ # some v8 tests to fail in the Debug configuration. This roughly
+ # matches the performance of a Release build and can be used by
+ # embedders that need to build their own code as debug but don't want
+ # or need a debug version of V8. This should produce near-release
+ # speeds.
'v8_optimized_debug%': 0,
# Use external files for startup data blobs:
@@ -118,13 +130,23 @@
}, {
'os_posix%': 1,
}],
+ ['OS=="win" and use_goma==1', {
+ # goma doesn't support pch yet.
+ 'chromium_win_pch': 0,
+ # goma doesn't support PDB yet, so win_z7=1 or fastbuild=1.
+ 'conditions': [
+ ['win_z7==0 and fastbuild==0', {
+ 'fastbuild': 1,
+ }],
+ ],
+ }],
['(v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
(OS=="linux" or OS=="mac")', {
'v8_enable_gdbjit%': 1,
}, {
'v8_enable_gdbjit%': 0,
}],
- ['OS=="mac"', {
+ ['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64")', {
'clang%': 1,
}, {
'clang%': 0,
@@ -161,10 +183,13 @@
'_GLIBCXX_DEBUG'
],
}],
+ [ 'OS=="aix"', {
+ 'cflags': [ '-gxcoff' ],
+ }],
],
},
'Optdebug': {
- 'inherit_from': [ 'DebugBaseCommon', 'DebugBase2' ],
+ 'inherit_from': [ 'DebugBaseCommon', 'DebugBase1' ],
},
'Debug': {
# Xcode insists on this empty entry.
@@ -201,7 +226,7 @@
],
},
'conditions': [
- ['asan==1', {
+ ['asan==1 and OS!="mac"', {
'target_defaults': {
'cflags_cc+': [
'-fno-omit-frame-pointer',
@@ -209,7 +234,7 @@
'-fsanitize=address',
'-w', # http://crbug.com/162783
],
- 'cflags_cc!': [
+ 'cflags!': [
'-fomit-frame-pointer',
],
'ldflags': [
@@ -217,7 +242,7 @@
],
},
}],
- ['tsan==1', {
+ ['tsan==1 and OS!="mac"', {
'target_defaults': {
'cflags+': [
'-fno-omit-frame-pointer',
@@ -238,8 +263,31 @@
],
},
}],
+ ['asan==1 and OS=="mac"', {
+ 'target_defaults': {
+ 'xcode_settings': {
+ 'OTHER_CFLAGS+': [
+ '-fno-omit-frame-pointer',
+ '-gline-tables-only',
+ '-fsanitize=address',
+ '-w', # http://crbug.com/162783
+ ],
+ 'OTHER_CFLAGS!': [
+ '-fomit-frame-pointer',
+ ],
+ },
+ 'target_conditions': [
+ ['_type!="static_library"', {
+ 'xcode_settings': {'OTHER_LDFLAGS': ['-fsanitize=address']},
+ }],
+ ],
+ 'dependencies': [
+ '<(DEPTH)/build/mac/asan.gyp:asan_dynamic_runtime',
+ ],
+ },
+ }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
- or OS=="netbsd"', {
+ or OS=="netbsd" or OS=="aix"', {
'target_defaults': {
'cflags': [
'-Wall',
@@ -256,6 +304,9 @@
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'ldflags': [ '-pthread', ],
'conditions': [
+ [ 'host_arch=="ppc64"', {
+ 'cflags': [ '-mminimal-toc' ],
+ }],
[ 'visibility=="hidden" and v8_enable_backtrace==0', {
'cflags': [ '-fvisibility=hidden' ],
}],
@@ -442,10 +493,19 @@
}, # target_defaults
}], # OS=="mac"
['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) '
- 'and OS!="win"', {
+ 'and OS!="win" and "<(GENERATOR)"=="make"', {
'make_global_settings': [
- ['CC', '<(make_clang_dir)/bin/clang'],
- ['CXX', '<(make_clang_dir)/bin/clang++'],
+ ['CC', '../<(clang_dir)/bin/clang'],
+ ['CXX', '../<(clang_dir)/bin/clang++'],
+ ['CC.host', '$(CC)'],
+ ['CXX.host', '$(CXX)'],
+ ],
+ }],
+ ['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) '
+ 'and OS!="win" and "<(GENERATOR)"=="ninja"', {
+ 'make_global_settings': [
+ ['CC', '<(clang_dir)/bin/clang'],
+ ['CXX', '<(clang_dir)/bin/clang++'],
['CC.host', '$(CC)'],
['CXX.host', '$(CXX)'],
],
@@ -453,7 +513,18 @@
['clang==1 and OS=="win"', {
'make_global_settings': [
# On Windows, gyp's ninja generator only looks at CC.
- ['CC', '<(make_clang_dir)/bin/clang-cl'],
+ ['CC', '../<(clang_dir)/bin/clang-cl'],
+ ],
+ }],
+ # TODO(yyanagisawa): supports GENERATOR==make
+ # make generator doesn't support CC_wrapper without CC
+ # in make_global_settings yet.
+ ['use_goma==1 and ("<(GENERATOR)"=="ninja" or clang==1)', {
+ 'make_global_settings': [
+ ['CC_wrapper', '<(gomadir)/gomacc'],
+ ['CXX_wrapper', '<(gomadir)/gomacc'],
+ ['CC.host_wrapper', '<(gomadir)/gomacc'],
+ ['CXX.host_wrapper', '<(gomadir)/gomacc'],
],
}],
],
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index ac10065601..d4a9403cbd 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -31,7 +31,14 @@
'variables': {
'msvs_use_common_release': 0,
'clang%': 0,
+ 'asan%': 0,
+ 'lsan%': 0,
+ 'msan%': 0,
+ 'tsan%': 0,
+ 'ubsan%': 0,
+ 'ubsan_vptr%': 0,
'v8_target_arch%': '<(target_arch)',
+ 'v8_host_byteorder%': '<!(python -c "import sys; print sys.byteorder")',
# Native Client builds currently use the V8 ARM JIT and
# arm/simulator-arm.cc to defer the significant effort required
# for NaCl JIT support. The nacl_target_arch variable provides
@@ -79,6 +86,46 @@
# Allow to suppress the array bounds warning (default is no suppression).
'wno_array_bounds%': '',
+ # Override where to find binutils
+ 'binutils_dir%': '',
+
+ 'conditions': [
+ ['OS=="linux" and host_arch=="x64"', {
+ 'binutils_dir%': 'third_party/binutils/Linux_x64/Release/bin',
+ }],
+ ['OS=="linux" and host_arch=="ia32"', {
+ 'binutils_dir%': 'third_party/binutils/Linux_ia32/Release/bin',
+ }],
+
+ # linux_use_bundled_gold: whether to use the gold linker binary checked
+ # into third_party/binutils. Force this off via GYP_DEFINES when you
+ # are using a custom toolchain and need to control -B in ldflags.
+ # Do not use 32-bit gold on 32-bit hosts as it runs out address space
+ # for component=static_library builds.
+ ['OS=="linux" and (target_arch=="x64" or target_arch=="arm")', {
+ 'linux_use_bundled_gold%': 1,
+ }, {
+ 'linux_use_bundled_gold%': 0,
+ }],
+ # linux_use_bundled_binutils: whether to use the binary binutils
+ # checked into third_party/binutils. These are not multi-arch so cannot
+ # be used except on x86 and x86-64 (the only two architectures which
+ # are currently checke in). Force this off via GYP_DEFINES when you
+ # are using a custom toolchain and need to control -B in cflags.
+ ['OS=="linux" and (target_arch=="ia32" or target_arch=="x64")', {
+ 'linux_use_bundled_binutils%': 1,
+ }, {
+ 'linux_use_bundled_binutils%': 0,
+ }],
+ # linux_use_gold_flags: whether to use build flags that rely on gold.
+ # On by default for x64 Linux.
+ ['OS=="linux" and target_arch=="x64"', {
+ 'linux_use_gold_flags%': 1,
+ }, {
+ 'linux_use_gold_flags%': 0,
+ }],
+ ],
+
# Link-Time Optimizations
'use_lto%': 0,
@@ -91,7 +138,9 @@
'android_webview_build%': '<(android_webview_build)',
},
'conditions': [
- ['host_arch=="ia32" or host_arch=="x64" or clang==1', {
+ ['host_arch=="ia32" or host_arch=="x64" or \
+ host_arch=="ppc" or host_arch=="ppc64" or \
+ clang==1', {
'variables': {
'host_cxx_is_biarch%': 1,
},
@@ -101,6 +150,7 @@
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
+ target_arch=="ppc" or target_arch=="ppc64" or \
clang==1', {
'variables': {
'target_cxx_is_biarch%': 1,
@@ -250,6 +300,38 @@
'V8_TARGET_ARCH_ARM64',
],
}],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_PPC',
+ ],
+ 'conditions': [
+ ['v8_target_arch=="ppc64"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_PPC64',
+ ],
+ }],
+ ['v8_host_byteorder=="little"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_PPC_LE',
+ ],
+ }],
+ ['v8_host_byteorder=="big"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_PPC_BE',
+ ],
+ 'conditions': [
+ ['OS=="aix"', {
+ # Work around AIX ceil, trunc and round oddities.
+ 'cflags': [ '-mcpu=power5+ -mfprnd' ],
+ }],
+ ['OS=="aix"', {
+ # Work around AIX assembler popcntb bug.
+ 'cflags': [ '-mno-popcntb' ],
+ }],
+ ],
+ }],
+ ],
+ }], # ppc
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
@@ -287,7 +369,10 @@
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
- 'cflags': ['-EB'],
+ 'cflags': [
+ '-EB',
+ '-Wno-error=array-bounds', # Workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56273
+ ],
'ldflags': ['-EB'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
@@ -471,7 +556,10 @@
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
- 'cflags': ['-EL'],
+ 'cflags': [
+ '-EL',
+ '-Wno-error=array-bounds', # Workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56273
+ ],
'ldflags': ['-EL'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
@@ -671,7 +759,10 @@
['_toolset=="target"', {
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
- 'cflags': ['-EL'],
+ 'cflags': [
+ '-EL',
+ '-Wno-error=array-bounds', # Workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56273
+ ],
'ldflags': ['-EL'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
@@ -755,6 +846,26 @@
'-mx32',
],
}], # v8_target_arch=="x32"
+ ['linux_use_gold_flags==1', {
+ # Newer gccs and clangs support -fuse-ld, use the flag to force gold
+ # selection.
+ # gcc -- http://gcc.gnu.org/onlinedocs/gcc-4.8.0/gcc/Optimize-Options.html
+ 'ldflags': [ '-fuse-ld=gold', ],
+ }],
+ ['linux_use_bundled_binutils==1', {
+ 'cflags': [
+ '-B<!(cd <(DEPTH) && pwd -P)/<(binutils_dir)',
+ ],
+ }],
+ ['linux_use_bundled_gold==1', {
+ # Put our binutils, which contains gold in the search path. We pass
+ # the path to gold to the compiler. gyp leaves unspecified what the
+ # cwd is when running the compiler, so the normal gyp path-munging
+ # fails us. This hack gets the right path.
+ 'ldflags': [
+ '-B<!(cd <(DEPTH) && pwd -P)/<(binutils_dir)',
+ ],
+ }],
['OS=="win"', {
'defines': [
'WIN32',
@@ -783,11 +894,20 @@
},
},
}],
- ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
+ ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
+ or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
+ v8_target_arch=="ia32"', {
+ 'cflags': [
+ '-msse2',
+ '-mfpmath=sse',
+ '-mmmx', # Allows mmintrin.h for MMX intrinsics.
+ ],
+ }],
+ ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="x87" or v8_target_arch=="mips" or \
- v8_target_arch=="mipsel")', {
+ v8_target_arch=="mipsel" or v8_target_arch=="ppc")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
@@ -820,7 +940,8 @@
],
}],
['(OS=="linux" or OS=="android") and \
- (v8_target_arch=="x64" or v8_target_arch=="arm64")', {
+ (v8_target_arch=="x64" or v8_target_arch=="arm64" or \
+ v8_target_arch=="ppc64")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
@@ -847,7 +968,7 @@
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
- or OS=="netbsd" or OS=="qnx"', {
+ or OS=="netbsd" or OS=="qnx" or OS=="aix"', {
'conditions': [
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ],
@@ -863,6 +984,21 @@
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
+ ['OS=="aix"', {
+ 'defines': [
+ # Support for malloc(0)
+ '_LINUX_SOURCE_COMPAT=1',
+ '_ALL_SOURCE=1'],
+ 'conditions': [
+ [ 'v8_target_arch=="ppc"', {
+ 'ldflags': [ '-Wl,-bmaxdata:0x60000000/dsa' ],
+ }],
+ [ 'v8_target_arch=="ppc64"', {
+ 'cflags': [ '-maix64' ],
+ 'ldflags': [ '-maix64' ],
+ }],
+ ],
+ }],
], # conditions
'configurations': {
# Abstract configuration for v8_optimized_debug == 0.
@@ -883,9 +1019,12 @@
'LinkIncremental': '2',
},
},
+ 'variables': {
+ 'v8_enable_slow_dchecks%': 1,
+ },
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
- OS=="qnx"', {
+ OS=="qnx" or OS=="aix"', {
'cflags!': [
'-O3',
'-O2',
@@ -902,9 +1041,11 @@
'GCC_OPTIMIZATION_LEVEL': '0', # -O0
},
}],
- ],
- 'defines': [
- 'ENABLE_SLOW_DCHECKS',
+ ['v8_enable_slow_dchecks==1', {
+ 'defines': [
+ 'ENABLE_SLOW_DCHECKS',
+ ],
+ }],
],
}, # DebugBase0
# Abstract configuration for v8_optimized_debug == 1.
@@ -912,55 +1053,6 @@
'abstract': 1,
'msvs_settings': {
'VCCLCompilerTool': {
- 'Optimization': '1',
- 'InlineFunctionExpansion': '2',
- 'EnableIntrinsicFunctions': 'true',
- 'FavorSizeOrSpeed': '0',
- 'StringPooling': 'true',
- 'BasicRuntimeChecks': '0',
- 'conditions': [
- ['component=="shared_library"', {
- 'RuntimeLibrary': '3', # /MDd
- }, {
- 'RuntimeLibrary': '1', # /MTd
- }],
- ],
- },
- 'VCLinkerTool': {
- 'LinkIncremental': '2',
- },
- },
- 'defines': [
- 'ENABLE_SLOW_DCHECKS',
- ],
- 'conditions': [
- ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
- OS=="qnx"', {
- 'cflags!': [
- '-O0',
- '-O3', # TODO(2807) should be -O1.
- '-O2',
- '-Os',
- ],
- 'cflags': [
- '-fdata-sections',
- '-ffunction-sections',
- '-O1', # TODO(2807) should be -O3.
- ],
- }],
- ['OS=="mac"', {
- 'xcode_settings': {
- 'GCC_OPTIMIZATION_LEVEL': '3', # -O3
- 'GCC_STRICT_ALIASING': 'YES',
- },
- }],
- ],
- }, # DebugBase1
- # Abstract configuration for v8_optimized_debug == 2.
- 'DebugBase2': {
- 'abstract': 1,
- 'msvs_settings': {
- 'VCCLCompilerTool': {
'Optimization': '2',
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
@@ -981,9 +1073,12 @@
'EnableCOMDATFolding': '2',
},
},
+ 'variables': {
+ 'v8_enable_slow_dchecks%': 0,
+ },
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
- OS=="qnx"', {
+ OS=="qnx" or OS=="aix"', {
'cflags!': [
'-O0',
'-O1',
@@ -995,7 +1090,9 @@
],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
- ['nacl_target_arch=="none"', {
+ # Don't use -O3 with sanitizers.
+ ['nacl_target_arch=="none" and asan==0 and msan==0 and lsan==0 \
+ and tsan==0 and ubsan==0 and ubsan_vptr==0', {
'cflags': ['-O3'],
'cflags!': ['-O2'],
}, {
@@ -1010,8 +1107,13 @@
'GCC_STRICT_ALIASING': 'YES',
},
}],
+ ['v8_enable_slow_dchecks==1', {
+ 'defines': [
+ 'ENABLE_SLOW_DCHECKS',
+ ],
+ }],
],
- }, # DebugBase2
+ }, # DebugBase1
# Common settings for the Debug configuration.
'DebugBaseCommon': {
'abstract': 1,
@@ -1025,13 +1127,16 @@
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
- OS=="qnx"', {
+ OS=="qnx" or OS=="aix"', {
'cflags': [ '-Woverloaded-virtual', '<(wno_array_bounds)', ],
}],
['OS=="linux" and v8_enable_backtrace==1', {
# Support for backtrace_symbols.
'ldflags': [ '-rdynamic' ],
}],
+ ['OS=="aix"', {
+ 'ldflags': [ '-Wl,-bbigtoc' ],
+ }],
['OS=="android"', {
'variables': {
'android_full_debug%': 1,
@@ -1047,6 +1152,21 @@
}],
],
}],
+ ['linux_use_gold_flags==1', {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'ldflags': [
+ # Experimentation found that using four linking threads
+ # saved ~20% of link time.
+ # https://groups.google.com/a/chromium.org/group/chromium-dev/browse_thread/thread/281527606915bb36
+ # Only apply this to the target linker, since the host
+ # linker might not be gold, but isn't used much anyway.
+ '-Wl,--threads',
+ '-Wl,--thread-count=4',
+ ],
+ }],
+ ],
+ }],
],
}, # DebugBaseCommon
'Debug': {
@@ -1054,18 +1174,18 @@
'conditions': [
['v8_optimized_debug==0', {
'inherit_from': ['DebugBase0'],
- }],
- ['v8_optimized_debug==1', {
+ }, {
'inherit_from': ['DebugBase1'],
}],
- ['v8_optimized_debug==2', {
- 'inherit_from': ['DebugBase2'],
- }],
],
}, # Debug
'Release': {
+ 'variables': {
+ 'v8_enable_slow_dchecks%': 0,
+ },
'conditions': [
- ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
+ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
+ or OS=="aix"', {
'cflags!': [
'-Os',
],
@@ -1076,7 +1196,9 @@
],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
- ['nacl_target_arch=="none"', {
+ # Don't use -O3 with sanitizers.
+ ['nacl_target_arch=="none" and asan==0 and msan==0 and lsan==0 \
+ and tsan==0 and ubsan==0 and ubsan_vptr==0', {
'cflags': ['-O3'],
'cflags!': ['-O2'],
}, {
@@ -1130,6 +1252,11 @@
},
},
}], # OS=="win"
+ ['v8_enable_slow_dchecks==1', {
+ 'defines': [
+ 'ENABLE_SLOW_DCHECKS',
+ ],
+ }],
], # conditions
}, # Release
}, # configurations
diff --git a/deps/v8/codereview.settings b/deps/v8/codereview.settings
index a7ee88e361..7c4dd8e255 100644
--- a/deps/v8/codereview.settings
+++ b/deps/v8/codereview.settings
@@ -4,6 +4,5 @@ VIEW_VC: https://chromium.googlesource.com/v8/v8/+/
STATUS: http://v8-status.appspot.com/status
TRY_ON_UPLOAD: False
TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8
-TRYSERVER_ROOT: v8
PROJECT: v8
PENDING_REF_PREFIX: refs/pending/
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 1eaf1ab68f..ca36b6c58b 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -105,6 +105,43 @@ class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> {
};
+template <typename K, typename V>
+class DefaultPhantomPersistentValueMapTraits : public StdMapTraits<K, V> {
+ private:
+ template <typename T>
+ struct RemovePointer;
+
+ public:
+ // Weak callback & friends:
+ static const PersistentContainerCallbackType kCallbackType = kNotWeak;
+ typedef PersistentValueMap<
+ K, V, DefaultPhantomPersistentValueMapTraits<K, V> > MapType;
+ typedef void PhantomCallbackDataType;
+
+ static PhantomCallbackDataType* PhantomCallbackParameter(MapType* map,
+ const K& key,
+ Local<V> value) {
+ return NULL;
+ }
+ static MapType* MapFromPhantomCallbackData(
+ const PhantomCallbackData<PhantomCallbackDataType>& data) {
+ return NULL;
+ }
+ static K KeyFromPhantomCallbackData(
+ const PhantomCallbackData<PhantomCallbackDataType>& data) {
+ return K();
+ }
+ static void DisposeCallbackData(PhantomCallbackDataType* data) {}
+ static void Dispose(Isolate* isolate, UniquePersistent<V> value, K key) {}
+
+ private:
+ template <typename T>
+ struct RemovePointer<T*> {
+ typedef T Type;
+ };
+};
+
+
/**
* A map wrapper that allows using UniquePersistent as a mapped value.
* C++11 embedders don't need this class, as they can use UniquePersistent
@@ -115,13 +152,9 @@ class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> {
* PersistentContainerValue, with all conversion into and out of V8
* handles being transparently handled by this class.
*/
-template<typename K, typename V, typename Traits>
-class PersistentValueMap {
+template <typename K, typename V, typename Traits>
+class PersistentValueMapBase {
public:
- explicit PersistentValueMap(Isolate* isolate) : isolate_(isolate) {}
-
- ~PersistentValueMap() { Clear(); }
-
Isolate* GetIsolate() { return isolate_; }
/**
@@ -168,23 +201,6 @@ class PersistentValueMap {
}
/**
- * Put value into map. Depending on Traits::kIsWeak, the value will be held
- * by the map strongly or weakly.
- * Returns old value as UniquePersistent.
- */
- UniquePersistent<V> Set(const K& key, Local<V> value) {
- UniquePersistent<V> persistent(isolate_, value);
- return SetUnique(key, &persistent);
- }
-
- /**
- * Put value into map, like Set(const K&, Local<V>).
- */
- UniquePersistent<V> Set(const K& key, UniquePersistent<V> value) {
- return SetUnique(key, &value);
- }
-
- /**
* Return value for key and remove it from the map.
*/
UniquePersistent<V> Remove(const K& key) {
@@ -237,7 +253,9 @@ class PersistentValueMap {
}
private:
- friend class PersistentValueMap;
+ friend class PersistentValueMapBase;
+ friend class PersistentValueMap<K, V, Traits>;
+ friend class PhantomPersistentValueMap<K, V, Traits>;
explicit PersistentValueReference(PersistentContainerValue value)
: value_(value) { }
@@ -263,19 +281,89 @@ class PersistentValueMap {
return PersistentValueReference(Traits::Get(&impl_, key));
}
+ protected:
+ explicit PersistentValueMapBase(Isolate* isolate) : isolate_(isolate) {}
+
+ ~PersistentValueMapBase() { Clear(); }
+
+ Isolate* isolate() { return isolate_; }
+ typename Traits::Impl* impl() { return &impl_; }
+
+ static V* FromVal(PersistentContainerValue v) {
+ return reinterpret_cast<V*>(v);
+ }
+
+ static PersistentContainerValue ClearAndLeak(
+ UniquePersistent<V>* persistent) {
+ V* v = persistent->val_;
+ persistent->val_ = 0;
+ return reinterpret_cast<PersistentContainerValue>(v);
+ }
+
+ static PersistentContainerValue Leak(UniquePersistent<V>* persistent) {
+ return reinterpret_cast<PersistentContainerValue>(persistent->val_);
+ }
+
/**
- * Put a value into the map and update the reference.
- * Restrictions of GetReference apply here as well.
+ * Return a container value as UniquePersistent and make sure the weak
+ * callback is properly disposed of. All remove functionality should go
+ * through this.
*/
- UniquePersistent<V> Set(const K& key, UniquePersistent<V> value,
- PersistentValueReference* reference) {
- *reference = Leak(&value);
- return SetUnique(key, &value);
+ static UniquePersistent<V> Release(PersistentContainerValue v) {
+ UniquePersistent<V> p;
+ p.val_ = FromVal(v);
+ if (Traits::kCallbackType != kNotWeak && p.IsWeak()) {
+ Traits::DisposeCallbackData(
+ p.template ClearWeak<typename Traits::WeakCallbackDataType>());
+ }
+ return p.Pass();
}
private:
- PersistentValueMap(PersistentValueMap&);
- void operator=(PersistentValueMap&);
+ PersistentValueMapBase(PersistentValueMapBase&);
+ void operator=(PersistentValueMapBase&);
+
+ static bool SetReturnValueFromVal(ReturnValue<Value>* returnValue,
+ PersistentContainerValue value) {
+ bool hasValue = value != kPersistentContainerNotFound;
+ if (hasValue) {
+ returnValue->SetInternal(
+ *reinterpret_cast<internal::Object**>(FromVal(value)));
+ }
+ return hasValue;
+ }
+
+ Isolate* isolate_;
+ typename Traits::Impl impl_;
+};
+
+
+template <typename K, typename V, typename Traits>
+class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
+ public:
+ explicit PersistentValueMap(Isolate* isolate)
+ : PersistentValueMapBase<K, V, Traits>(isolate) {}
+
+ typedef
+ typename PersistentValueMapBase<K, V, Traits>::PersistentValueReference
+ PersistentValueReference;
+
+ /**
+ * Put value into map. Depending on Traits::kIsWeak, the value will be held
+ * by the map strongly or weakly.
+ * Returns old value as UniquePersistent.
+ */
+ UniquePersistent<V> Set(const K& key, Local<V> value) {
+ UniquePersistent<V> persistent(this->isolate(), value);
+ return SetUnique(key, &persistent);
+ }
+
+ /**
+ * Put value into map, like Set(const K&, Local<V>).
+ */
+ UniquePersistent<V> Set(const K& key, UniquePersistent<V> value) {
+ return SetUnique(key, &value);
+ }
/**
* Put the value into the map, and set the 'weak' callback when demanded
@@ -283,15 +371,26 @@ class PersistentValueMap {
*/
UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) {
if (Traits::kCallbackType != kNotWeak) {
- Local<V> value(Local<V>::New(isolate_, *persistent));
+ Local<V> value(Local<V>::New(this->isolate(), *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
Traits::WeakCallbackParameter(this, key, value), WeakCallback);
}
PersistentContainerValue old_value =
- Traits::Set(&impl_, key, ClearAndLeak(persistent));
- return Release(old_value).Pass();
+ Traits::Set(this->impl(), key, this->ClearAndLeak(persistent));
+ return this->Release(old_value).Pass();
+ }
+
+ /**
+ * Put a value into the map and update the reference.
+ * Restrictions of GetReference apply here as well.
+ */
+ UniquePersistent<V> Set(const K& key, UniquePersistent<V> value,
+ PersistentValueReference* reference) {
+ *reference = this->Leak(&value);
+ return SetUnique(key, &value);
}
+ private:
static void WeakCallback(
const WeakCallbackData<V, typename Traits::WeakCallbackDataType>& data) {
if (Traits::kCallbackType != kNotWeak) {
@@ -303,50 +402,73 @@ class PersistentValueMap {
Traits::DisposeCallbackData(data.GetParameter());
}
}
+};
- static V* FromVal(PersistentContainerValue v) {
- return reinterpret_cast<V*>(v);
- }
- static bool SetReturnValueFromVal(
- ReturnValue<Value>* returnValue, PersistentContainerValue value) {
- bool hasValue = value != kPersistentContainerNotFound;
- if (hasValue) {
- returnValue->SetInternal(
- *reinterpret_cast<internal::Object**>(FromVal(value)));
- }
- return hasValue;
- }
+template <typename K, typename V, typename Traits>
+class PhantomPersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
+ public:
+ explicit PhantomPersistentValueMap(Isolate* isolate)
+ : PersistentValueMapBase<K, V, Traits>(isolate) {}
- static PersistentContainerValue ClearAndLeak(
- UniquePersistent<V>* persistent) {
- V* v = persistent->val_;
- persistent->val_ = 0;
- return reinterpret_cast<PersistentContainerValue>(v);
+ typedef
+ typename PersistentValueMapBase<K, V, Traits>::PersistentValueReference
+ PersistentValueReference;
+
+ /**
+ * Put value into map. Depending on Traits::kIsWeak, the value will be held
+ * by the map strongly or weakly.
+ * Returns old value as UniquePersistent.
+ */
+ UniquePersistent<V> Set(const K& key, Local<V> value) {
+ UniquePersistent<V> persistent(this->isolate(), value);
+ return SetUnique(key, &persistent);
}
- static PersistentContainerValue Leak(
- UniquePersistent<V>* persistent) {
- return reinterpret_cast<PersistentContainerValue>(persistent->val_);
+ /**
+ * Put value into map, like Set(const K&, Local<V>).
+ */
+ UniquePersistent<V> Set(const K& key, UniquePersistent<V> value) {
+ return SetUnique(key, &value);
}
/**
- * Return a container value as UniquePersistent and make sure the weak
- * callback is properly disposed of. All remove functionality should go
- * through this.
+ * Put the value into the map, and set the 'weak' callback when demanded
+ * by the Traits class.
*/
- static UniquePersistent<V> Release(PersistentContainerValue v) {
- UniquePersistent<V> p;
- p.val_ = FromVal(v);
- if (Traits::kCallbackType != kNotWeak && p.IsWeak()) {
- Traits::DisposeCallbackData(
- p.template ClearWeak<typename Traits::WeakCallbackDataType>());
+ UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) {
+ if (Traits::kCallbackType != kNotWeak) {
+ Local<V> value(Local<V>::New(this->isolate(), *persistent));
+ persistent->template SetPhantom<typename Traits::WeakCallbackDataType>(
+ Traits::WeakCallbackParameter(this, key, value), WeakCallback, 0, 1);
}
- return p.Pass();
+ PersistentContainerValue old_value =
+ Traits::Set(this->impl(), key, this->ClearAndLeak(persistent));
+ return this->Release(old_value).Pass();
}
- Isolate* isolate_;
- typename Traits::Impl impl_;
+ /**
+ * Put a value into the map and update the reference.
+ * Restrictions of GetReference apply here as well.
+ */
+ UniquePersistent<V> Set(const K& key, UniquePersistent<V> value,
+ PersistentValueReference* reference) {
+ *reference = this->Leak(&value);
+ return SetUnique(key, &value);
+ }
+
+ private:
+ static void WeakCallback(
+ const PhantomCallbackData<typename Traits::WeakCallbackDataType>& data) {
+ if (Traits::kCallbackType != kNotWeak) {
+ PhantomPersistentValueMap<K, V, Traits>* persistentValueMap =
+ Traits::MapFromPhantomCallbackData(data);
+ K key = Traits::KeyFromPhantomCallbackData(data);
+ Traits::Dispose(data.GetIsolate(), persistentValueMap->Remove(key).Pass(),
+ key);
+ Traits::DisposeCallbackData(data.GetParameter());
+ }
+ }
};
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 61ee03872d..49f41f0507 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
-#define V8_MINOR_VERSION 1
-#define V8_BUILD_NUMBER 0
-#define V8_PATCH_LEVEL 27
+#define V8_MINOR_VERSION 2
+#define V8_BUILD_NUMBER 77
+#define V8_PATCH_LEVEL 13
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 218e919a16..adc85efbc8 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -73,7 +73,6 @@ class Context;
class CpuProfiler;
class Data;
class Date;
-class DeclaredAccessorDescriptor;
class External;
class Function;
class FunctionTemplate;
@@ -113,6 +112,10 @@ template<class T,
class M = NonCopyablePersistentTraits<T> > class Persistent;
template<class T> class UniquePersistent;
template<class K, class V, class T> class PersistentValueMap;
+template <class K, class V, class T>
+class PersistentValueMapBase;
+template <class K, class V, class T>
+class PhantomPersistentValueMap;
template<class V, class T> class PersistentValueVector;
template<class T, class P> class WeakCallbackObject;
class FunctionTemplate;
@@ -123,9 +126,6 @@ template<typename T> class PropertyCallbackInfo;
class StackTrace;
class StackFrame;
class Isolate;
-class DeclaredAccessorDescriptor;
-class ObjectOperationDescriptor;
-class RawOperationDescriptor;
class CallHandlerHelper;
class EscapableHandleScope;
template<typename T> class ReturnValue;
@@ -142,15 +142,18 @@ class PropertyCallbackArguments;
class FunctionCallbackArguments;
class GlobalHandles;
+template <typename T>
class CallbackData {
public:
V8_INLINE v8::Isolate* GetIsolate() const { return isolate_; }
- protected:
- explicit CallbackData(v8::Isolate* isolate) : isolate_(isolate) {}
+ explicit CallbackData(v8::Isolate* isolate, T* parameter)
+ : isolate_(isolate), parameter_(parameter) {}
+ V8_INLINE T* GetParameter() const { return parameter_; }
private:
v8::Isolate* isolate_;
+ T* parameter_;
};
}
@@ -403,7 +406,8 @@ template <class T> class Local : public Handle<T> {
template<class F> friend class internal::CustomArguments;
friend class HandleScope;
friend class EscapableHandleScope;
- template<class F1, class F2, class F3> friend class PersistentValueMap;
+ template <class F1, class F2, class F3>
+ friend class PersistentValueMapBase;
template<class F1, class F2> friend class PersistentValueVector;
template <class S> V8_INLINE Local(S* that) : Handle<T>(that) { }
@@ -431,22 +435,27 @@ template <class T> class Eternal {
template <typename T>
-class PhantomCallbackData : public internal::CallbackData {
+class PhantomCallbackData : public internal::CallbackData<T> {
public:
typedef void (*Callback)(const PhantomCallbackData<T>& data);
- V8_INLINE T* GetParameter() const { return parameter_; }
+ V8_INLINE void* GetInternalField1() const { return internal_field1_; }
+ V8_INLINE void* GetInternalField2() const { return internal_field2_; }
- PhantomCallbackData<T>(Isolate* isolate, T* parameter)
- : internal::CallbackData(isolate), parameter_(parameter) {}
+ PhantomCallbackData(Isolate* isolate, T* parameter, void* internal_field1,
+ void* internal_field2)
+ : internal::CallbackData<T>(isolate, parameter),
+ internal_field1_(internal_field1),
+ internal_field2_(internal_field2) {}
private:
- T* parameter_;
+ void* internal_field1_;
+ void* internal_field2_;
};
template <class T, class P>
-class WeakCallbackData : public PhantomCallbackData<P> {
+class WeakCallbackData : public internal::CallbackData<P> {
public:
typedef void (*Callback)(const WeakCallbackData<T, P>& data);
@@ -455,29 +464,12 @@ class WeakCallbackData : public PhantomCallbackData<P> {
private:
friend class internal::GlobalHandles;
WeakCallbackData(Isolate* isolate, P* parameter, Local<T> handle)
- : PhantomCallbackData<P>(isolate, parameter), handle_(handle) {}
+ : internal::CallbackData<P>(isolate, parameter), handle_(handle) {}
Local<T> handle_;
};
-template <typename T, typename U>
-class InternalFieldsCallbackData : public internal::CallbackData {
- public:
- typedef void (*Callback)(const InternalFieldsCallbackData<T, U>& data);
-
- InternalFieldsCallbackData(Isolate* isolate, T* internalField1,
- U* internalField2)
- : internal::CallbackData(isolate),
- internal_field1_(internalField1),
- internal_field2_(internalField2) {}
-
- V8_INLINE T* GetInternalField1() const { return internal_field1_; }
- V8_INLINE U* GetInternalField2() const { return internal_field2_; }
-
- private:
- T* internal_field1_;
- U* internal_field2_;
-};
+static const int kNoInternalFieldIndex = -1;
/**
@@ -568,12 +560,9 @@ template <class T> class PersistentBase {
// fields in the dying object.
template <typename P>
V8_INLINE void SetPhantom(P* parameter,
- typename PhantomCallbackData<P>::Callback callback);
-
- template <typename P, typename Q>
- V8_INLINE void SetPhantom(
- void (*callback)(const InternalFieldsCallbackData<P, Q>&),
- int internal_field_index1, int internal_field_index2);
+ typename PhantomCallbackData<P>::Callback callback,
+ int internal_field_index1 = kNoInternalFieldIndex,
+ int internal_field_index2 = kNoInternalFieldIndex);
template<typename P>
V8_INLINE P* ClearWeak();
@@ -628,7 +617,8 @@ template <class T> class PersistentBase {
template<class F> friend class UniquePersistent;
template<class F> friend class PersistentBase;
template<class F> friend class ReturnValue;
- template<class F1, class F2, class F3> friend class PersistentValueMap;
+ template <class F1, class F2, class F3>
+ friend class PersistentValueMapBase;
template<class F1, class F2> friend class PersistentValueVector;
friend class Object;
@@ -987,21 +977,29 @@ class ScriptOrigin {
Handle<Integer> resource_line_offset = Handle<Integer>(),
Handle<Integer> resource_column_offset = Handle<Integer>(),
Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>(),
- Handle<Integer> script_id = Handle<Integer>())
+ Handle<Integer> script_id = Handle<Integer>(),
+ Handle<Boolean> resource_is_embedder_debug_script = Handle<Boolean>())
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset),
+ resource_is_embedder_debug_script_(resource_is_embedder_debug_script),
resource_is_shared_cross_origin_(resource_is_shared_cross_origin),
- script_id_(script_id) { }
+ script_id_(script_id) {}
V8_INLINE Handle<Value> ResourceName() const;
V8_INLINE Handle<Integer> ResourceLineOffset() const;
V8_INLINE Handle<Integer> ResourceColumnOffset() const;
+ /**
+ * Returns true for embedder's debugger scripts
+ */
+ V8_INLINE Handle<Boolean> ResourceIsEmbedderDebugScript() const;
V8_INLINE Handle<Boolean> ResourceIsSharedCrossOrigin() const;
V8_INLINE Handle<Integer> ScriptID() const;
+
private:
Handle<Value> resource_name_;
Handle<Integer> resource_line_offset_;
Handle<Integer> resource_column_offset_;
+ Handle<Boolean> resource_is_embedder_debug_script_;
Handle<Boolean> resource_is_shared_cross_origin_;
Handle<Integer> script_id_;
};
@@ -1165,6 +1163,7 @@ class V8_EXPORT ScriptCompiler {
Handle<Value> resource_name;
Handle<Integer> resource_line_offset;
Handle<Integer> resource_column_offset;
+ Handle<Boolean> resource_is_embedder_debug_script;
Handle<Boolean> resource_is_shared_cross_origin;
// Cached data from previous compilation (if a kConsume*Cache flag is
@@ -1328,6 +1327,39 @@ class V8_EXPORT ScriptCompiler {
* compared when it is being used.
*/
static uint32_t CachedDataVersionTag();
+
+ /**
+ * Compile an ES6 module.
+ *
+ * This is an experimental feature.
+ *
+ * TODO(adamk): Script is likely the wrong return value for this;
+ * should return some new Module type.
+ */
+ static Local<Script> CompileModule(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions);
+
+ /**
+ * Compile a function for a given context. This is equivalent to running
+ *
+ * with (obj) {
+ * return function(args) { ... }
+ * }
+ *
+ * It is possible to specify multiple context extensions (obj in the above
+ * example).
+ */
+ static Local<Function> CompileFunctionInContext(
+ Isolate* isolate, Source* source, Local<Context> context,
+ size_t arguments_count, Local<String> arguments[],
+ size_t context_extension_count, Local<Object> context_extensions[]);
+
+ private:
+ static Local<UnboundScript> CompileUnboundInternal(Isolate* isolate,
+ Source* source,
+ CompileOptions options,
+ bool is_module);
};
@@ -1556,6 +1588,21 @@ class V8_EXPORT JSON {
};
+/**
+ * A map whose keys are referenced weakly. It is similar to JavaScript WeakMap
+ * but can be created without entering a v8::Context and hence shouldn't
+ * escape to JavaScript.
+ */
+class V8_EXPORT NativeWeakMap : public Data {
+ public:
+ static Local<NativeWeakMap> New(Isolate* isolate);
+ void Set(Handle<Value> key, Handle<Value> value);
+ Local<Value> Get(Handle<Value> key);
+ bool Has(Handle<Value> key);
+ bool Delete(Handle<Value> key);
+};
+
+
// --- Value ---
@@ -2103,15 +2150,12 @@ class V8_EXPORT String : public Name {
V8_INLINE static String* Cast(v8::Value* obj);
- enum NewStringType {
- kNormalString, kInternalizedString, kUndetectableString
- };
+ enum NewStringType { kNormalString, kInternalizedString };
/** Allocates a new string from UTF-8 data.*/
- static Local<String> NewFromUtf8(Isolate* isolate,
- const char* data,
- NewStringType type = kNormalString,
- int length = -1);
+ static Local<String> NewFromUtf8(Isolate* isolate, const char* data,
+ NewStringType type = kNormalString,
+ int length = -1);
/** Allocates a new string from Latin-1 data.*/
static Local<String> NewFromOneByte(
@@ -2463,10 +2507,6 @@ class V8_EXPORT Object : public Value {
bool Delete(Handle<Value> key);
- // Delete a property on this object bypassing interceptors and
- // ignoring dont-delete attributes.
- bool ForceDelete(Handle<Value> key);
-
bool Has(uint32_t index);
bool Delete(uint32_t index);
@@ -2484,12 +2524,6 @@ class V8_EXPORT Object : public Value {
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None);
- // This function is not yet stable and should not be used at this time.
- bool SetDeclaredAccessor(Local<Name> name,
- Local<DeclaredAccessorDescriptor> descriptor,
- PropertyAttribute attribute = None,
- AccessControl settings = DEFAULT);
-
void SetAccessorProperty(Local<Name> name,
Local<Function> getter,
Handle<Function> setter = Handle<Function>(),
@@ -2557,8 +2591,6 @@ class V8_EXPORT Object : public Value {
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
- static const int kNoInternalFieldIndex = -1;
-
/** Same as above, but works for Persistents */
V8_INLINE static int InternalFieldCount(
const PersistentBase<Object>& object) {
@@ -2775,7 +2807,8 @@ class ReturnValue {
template<class F> friend class ReturnValue;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
- template<class F, class G, class H> friend class PersistentValueMap;
+ template <class F, class G, class H>
+ friend class PersistentValueMapBase;
V8_INLINE void SetInternal(internal::Object* value) { *value_ = value; }
V8_INLINE internal::Object* GetDefaultValue();
V8_INLINE explicit ReturnValue(internal::Object** slot);
@@ -2822,7 +2855,7 @@ class FunctionCallbackInfo {
internal::Object** implicit_args_;
internal::Object** values_;
int length_;
- bool is_construct_call_;
+ int is_construct_call_;
};
@@ -3558,14 +3591,6 @@ class V8_EXPORT Template : public Data {
Local<AccessorSignature>(),
AccessControl settings = DEFAULT);
- // This function is not yet stable and should not be used at this time.
- bool SetDeclaredAccessor(Local<Name> name,
- Local<DeclaredAccessorDescriptor> descriptor,
- PropertyAttribute attribute = None,
- Local<AccessorSignature> signature =
- Local<AccessorSignature>(),
- AccessControl settings = DEFAULT);
-
private:
Template();
@@ -3752,6 +3777,9 @@ typedef bool (*IndexedSecurityCallback)(Local<Object> host,
* temporary functions that can be collected using Scripts is
* preferred.
*
+ * Any modification of a FunctionTemplate after first instantiation will trigger
+ *a crash.
+ *
* A FunctionTemplate can have properties, these properties are added to the
* function object when it is created.
*
@@ -3918,6 +3946,9 @@ class V8_EXPORT FunctionTemplate : public Template {
};
+enum class PropertyHandlerFlags { kNone = 0, kAllCanRead = 1 };
+
+
struct NamedPropertyHandlerConfiguration {
NamedPropertyHandlerConfiguration(
/** Note: getter is required **/
@@ -3926,13 +3957,15 @@ struct NamedPropertyHandlerConfiguration {
GenericNamedPropertyQueryCallback query = 0,
GenericNamedPropertyDeleterCallback deleter = 0,
GenericNamedPropertyEnumeratorCallback enumerator = 0,
- Handle<Value> data = Handle<Value>())
+ Handle<Value> data = Handle<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
: getter(getter),
setter(setter),
query(query),
deleter(deleter),
enumerator(enumerator),
- data(data) {}
+ data(data),
+ flags(flags) {}
GenericNamedPropertyGetterCallback getter;
GenericNamedPropertySetterCallback setter;
@@ -3940,6 +3973,7 @@ struct NamedPropertyHandlerConfiguration {
GenericNamedPropertyDeleterCallback deleter;
GenericNamedPropertyEnumeratorCallback enumerator;
Handle<Value> data;
+ PropertyHandlerFlags flags;
};
@@ -3951,13 +3985,15 @@ struct IndexedPropertyHandlerConfiguration {
IndexedPropertyQueryCallback query = 0,
IndexedPropertyDeleterCallback deleter = 0,
IndexedPropertyEnumeratorCallback enumerator = 0,
- Handle<Value> data = Handle<Value>())
+ Handle<Value> data = Handle<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
: getter(getter),
setter(setter),
query(query),
deleter(deleter),
enumerator(enumerator),
- data(data) {}
+ data(data),
+ flags(flags) {}
IndexedPropertyGetterCallback getter;
IndexedPropertySetterCallback setter;
@@ -3965,6 +4001,7 @@ struct IndexedPropertyHandlerConfiguration {
IndexedPropertyDeleterCallback deleter;
IndexedPropertyEnumeratorCallback enumerator;
Handle<Value> data;
+ PropertyHandlerFlags flags;
};
@@ -4144,16 +4181,13 @@ class V8_EXPORT ObjectTemplate : public Template {
/**
- * A Signature specifies which receivers and arguments are valid
- * parameters to a function.
+ * A Signature specifies which receiver is valid for a function.
*/
class V8_EXPORT Signature : public Data {
public:
- static Local<Signature> New(Isolate* isolate,
- Handle<FunctionTemplate> receiver =
- Handle<FunctionTemplate>(),
- int argc = 0,
- Handle<FunctionTemplate> argv[] = 0);
+ static Local<Signature> New(
+ Isolate* isolate,
+ Handle<FunctionTemplate> receiver = Handle<FunctionTemplate>());
private:
Signature();
@@ -4175,61 +4209,6 @@ class V8_EXPORT AccessorSignature : public Data {
};
-class V8_EXPORT DeclaredAccessorDescriptor : public Data {
- private:
- DeclaredAccessorDescriptor();
-};
-
-
-class V8_EXPORT ObjectOperationDescriptor : public Data {
- public:
- // This function is not yet stable and should not be used at this time.
- static Local<RawOperationDescriptor> NewInternalFieldDereference(
- Isolate* isolate,
- int internal_field);
- private:
- ObjectOperationDescriptor();
-};
-
-
-enum DeclaredAccessorDescriptorDataType {
- kDescriptorBoolType,
- kDescriptorInt8Type, kDescriptorUint8Type,
- kDescriptorInt16Type, kDescriptorUint16Type,
- kDescriptorInt32Type, kDescriptorUint32Type,
- kDescriptorFloatType, kDescriptorDoubleType
-};
-
-
-class V8_EXPORT RawOperationDescriptor : public Data {
- public:
- Local<DeclaredAccessorDescriptor> NewHandleDereference(Isolate* isolate);
- Local<RawOperationDescriptor> NewRawDereference(Isolate* isolate);
- Local<RawOperationDescriptor> NewRawShift(Isolate* isolate,
- int16_t byte_offset);
- Local<DeclaredAccessorDescriptor> NewPointerCompare(Isolate* isolate,
- void* compare_value);
- Local<DeclaredAccessorDescriptor> NewPrimitiveValue(
- Isolate* isolate,
- DeclaredAccessorDescriptorDataType data_type,
- uint8_t bool_offset = 0);
- Local<DeclaredAccessorDescriptor> NewBitmaskCompare8(Isolate* isolate,
- uint8_t bitmask,
- uint8_t compare_value);
- Local<DeclaredAccessorDescriptor> NewBitmaskCompare16(
- Isolate* isolate,
- uint16_t bitmask,
- uint16_t compare_value);
- Local<DeclaredAccessorDescriptor> NewBitmaskCompare32(
- Isolate* isolate,
- uint32_t bitmask,
- uint32_t compare_value);
-
- private:
- RawOperationDescriptor();
-};
-
-
/**
* A utility for determining the type of objects based on the template
* they were constructed from.
@@ -5335,7 +5314,8 @@ class V8_EXPORT Isolate {
void VisitHandlesForPartialDependence(PersistentHandleVisitor* visitor);
private:
- template<class K, class V, class Traits> friend class PersistentValueMap;
+ template <class K, class V, class Traits>
+ friend class PersistentValueMapBase;
Isolate();
Isolate(const Isolate&);
@@ -5436,7 +5416,7 @@ class V8_EXPORT V8 {
* Returns { NULL, 0 } on failure.
* The caller owns the data array in the return value.
*/
- static StartupData CreateSnapshotDataBlob();
+ static StartupData CreateSnapshotDataBlob(char* custom_source = NULL);
/**
* Adds a message listener.
@@ -5688,12 +5668,11 @@ class V8_EXPORT V8 {
static void MakeWeak(internal::Object** global_handle, void* data,
WeakCallback weak_callback);
static void MakePhantom(internal::Object** global_handle, void* data,
+ // Must be 0 or kNoInternalFieldIndex.
+ int internal_field_index1,
+ // Must be 1 or kNoInternalFieldIndex.
+ int internal_field_index2,
PhantomCallbackData<void>::Callback weak_callback);
- static void MakePhantom(
- internal::Object** global_handle,
- InternalFieldsCallbackData<void, void>::Callback weak_callback,
- int internal_field_index1,
- int internal_field_index2 = Object::kNoInternalFieldIndex);
static void* ClearWeak(internal::Object** global_handle);
static void Eternalize(Isolate* isolate,
Value* handle,
@@ -6276,7 +6255,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 76;
+ static const int kContextEmbedderDataIndex = 74;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@@ -6294,7 +6273,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 154;
+ static const int kEmptyStringRootIndex = 155;
// The external allocation limit should be below 256 MB on all architectures
// to avoid that resource-constrained embedders run low on memory.
@@ -6581,26 +6560,16 @@ void PersistentBase<T>::SetWeak(
template <class T>
template <typename P>
void PersistentBase<T>::SetPhantom(
- P* parameter, typename PhantomCallbackData<P>::Callback callback) {
+ P* parameter, typename PhantomCallbackData<P>::Callback callback,
+ int internal_field_index1, int internal_field_index2) {
typedef typename PhantomCallbackData<void>::Callback Callback;
V8::MakePhantom(reinterpret_cast<internal::Object**>(this->val_), parameter,
+ internal_field_index1, internal_field_index2,
reinterpret_cast<Callback>(callback));
}
template <class T>
-template <typename U, typename V>
-void PersistentBase<T>::SetPhantom(
- void (*callback)(const InternalFieldsCallbackData<U, V>&),
- int internal_field_index1, int internal_field_index2) {
- typedef typename InternalFieldsCallbackData<void, void>::Callback Callback;
- V8::MakePhantom(reinterpret_cast<internal::Object**>(this->val_),
- reinterpret_cast<Callback>(callback), internal_field_index1,
- internal_field_index2);
-}
-
-
-template <class T>
template <typename P>
P* PersistentBase<T>::ClearWeak() {
return reinterpret_cast<P*>(
@@ -6814,7 +6783,7 @@ ReturnValue<T> FunctionCallbackInfo<T>::GetReturnValue() const {
template<typename T>
bool FunctionCallbackInfo<T>::IsConstructCall() const {
- return is_construct_call_;
+ return is_construct_call_ & 0x1;
}
@@ -6839,6 +6808,11 @@ Handle<Integer> ScriptOrigin::ResourceColumnOffset() const {
}
+Handle<Boolean> ScriptOrigin::ResourceIsEmbedderDebugScript() const {
+ return resource_is_embedder_debug_script_;
+}
+
+
Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const {
return resource_is_shared_cross_origin_;
}
@@ -6855,6 +6829,7 @@ ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
resource_name(origin.ResourceName()),
resource_line_offset(origin.ResourceLineOffset()),
resource_column_offset(origin.ResourceColumnOffset()),
+ resource_is_embedder_debug_script(origin.ResourceIsEmbedderDebugScript()),
resource_is_shared_cross_origin(origin.ResourceIsSharedCrossOrigin()),
cached_data(data) {}
@@ -7402,9 +7377,8 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
amount - *amount_of_external_allocated_memory_at_last_global_gc >
I::kExternalAllocationLimit) {
CollectAllGarbage("external memory allocation limit reached.");
- } else {
- *amount_of_external_allocated_memory = amount;
}
+ *amount_of_external_allocated_memory = amount;
return *amount_of_external_allocated_memory;
}
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index d1ca22c229..f9c3ffde17 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -67,6 +67,7 @@
// V8_OS_POSIX - POSIX compatible (mostly everything except Windows)
// V8_OS_QNX - QNX Neutrino
// V8_OS_SOLARIS - Sun Solaris and OpenSolaris
+// V8_OS_AIX - AIX
// V8_OS_WIN - Microsoft Windows
#if defined(__ANDROID__)
@@ -89,6 +90,9 @@
#elif defined(__sun)
# define V8_OS_POSIX 1
# define V8_OS_SOLARIS 1
+#elif defined(_AIX)
+#define V8_OS_POSIX 1
+#define V8_OS_AIX 1
#elif defined(__FreeBSD__)
# define V8_OS_BSD 1
# define V8_OS_FREEBSD 1
@@ -181,6 +185,7 @@
// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
+// V8_HAS_DECLSPEC_SELECTANY - __declspec(selectany) supported
// V8_HAS___FINAL - __final supported in non-C++11 mode
// V8_HAS___FORCEINLINE - __forceinline supported
//
@@ -285,6 +290,7 @@
# define V8_HAS_DECLSPEC_ALIGN 1
# define V8_HAS_DECLSPEC_DEPRECATED 1
# define V8_HAS_DECLSPEC_NOINLINE 1
+# define V8_HAS_DECLSPEC_SELECTANY 1
# define V8_HAS___FORCEINLINE 1
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 1a08afffdc..0ed742d3e3 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -98,6 +98,7 @@ int main(int argc, char* argv[]) {
result = RunMain(isolate, argc, argv);
if (run_shell) RunShell(context);
}
+ isolate->Dispose();
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 662a9e126b..e08f86fc22 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -6,7 +6,6 @@
#include "src/accessors.h"
#include "src/api.h"
-#include "src/compiler.h"
#include "src/contexts.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
@@ -70,20 +69,10 @@ static V8_INLINE bool CheckForName(Handle<Name> name,
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
-template <class T>
-bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
- Handle<Name> name,
+bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
int* object_offset) {
Isolate* isolate = name->GetIsolate();
- if (type->Is(T::String())) {
- return CheckForName(name, isolate->factory()->length_string(),
- String::kLengthOffset, object_offset);
- }
-
- if (!type->IsClass()) return false;
- Handle<Map> map = type->AsClass()->Map();
-
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
return
@@ -108,23 +97,16 @@ bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
CheckForName(name, isolate->factory()->byte_offset_string(),
JSDataView::kByteOffsetOffset, object_offset);
default:
+ if (map->instance_type() < FIRST_NONSTRING_TYPE) {
+ return CheckForName(name, isolate->factory()->length_string(),
+ String::kLengthOffset, object_offset);
+ }
+
return false;
}
}
-template
-bool Accessors::IsJSObjectFieldAccessor<Type>(Type* type,
- Handle<Name> name,
- int* object_offset);
-
-
-template
-bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type,
- Handle<Name> name,
- int* object_offset);
-
-
bool SetPropertyOnInstanceIfInherited(
Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
v8::Local<v8::Name> name, Handle<Object> value) {
@@ -136,6 +118,12 @@ bool SetPropertyOnInstanceIfInherited(
// This behaves sloppy since we lost the actual strict-mode.
// TODO(verwaest): Fix by making ExecutableAccessorInfo behave like data
// properties.
+ if (object->IsJSGlobalProxy()) {
+ PrototypeIterator iter(isolate, object);
+ if (iter.IsAtEnd()) return true;
+ DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+ object = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ }
if (!object->map()->is_extensible()) return true;
JSObject::SetOwnPropertyIgnoreAttributes(object, Utils::OpenHandle(*name),
value, NONE).Check();
@@ -787,6 +775,40 @@ Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
//
+// Accessors::ScriptIsEmbedderDebugScript
+//
+
+
+void Accessors::ScriptIsEmbedderDebugScriptGetter(
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ DisallowHeapAllocation no_allocation;
+ HandleScope scope(isolate);
+ Object* object = *Utils::OpenHandle(*info.This());
+ bool is_embedder_debug_script =
+ Script::cast(JSValue::cast(object)->value())->is_embedder_debug_script();
+ Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script);
+ info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
+}
+
+
+void Accessors::ScriptIsEmbedderDebugScriptSetter(
+ v8::Local<v8::Name> name, v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptIsEmbedderDebugScriptInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ Handle<String> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("is_debugger_script")));
+ return MakeAccessor(isolate, name, &ScriptIsEmbedderDebugScriptGetter,
+ &ScriptIsEmbedderDebugScriptSetter, attributes);
+}
+
+
+//
// Accessors::ScriptGetContextData
//
@@ -1146,9 +1168,8 @@ static Handle<Object> ArgumentsForInlinedFunction(
Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory();
SlotRefValueBuilder slot_refs(
- frame,
- inlined_frame_index,
- inlined_function->shared()->formal_parameter_count());
+ frame, inlined_frame_index,
+ inlined_function->shared()->internal_formal_parameter_count());
int args_count = slot_refs.args_length();
Handle<JSObject> arguments =
@@ -1372,7 +1393,7 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
// Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
- if (caller->shared()->strict_mode() == STRICT) {
+ if (is_strict(caller->shared()->language_mode())) {
return MaybeHandle<JSFunction>();
}
// Don't return caller from another security context.
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 0210d535e5..e4ad691f73 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -36,6 +36,7 @@ namespace internal {
V(ScriptType) \
V(ScriptSourceUrl) \
V(ScriptSourceMappingUrl) \
+ V(ScriptIsEmbedderDebugScript) \
V(StringLength)
// Accessors contains all predefined proxy accessors.
@@ -77,9 +78,7 @@ class Accessors : public AllStatic {
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
- template <class T>
- static bool IsJSObjectFieldAccessor(typename T::TypeHandle type,
- Handle<Name> name,
+ static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
int* object_offset);
static Handle<AccessorInfo> MakeAccessor(
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc
index 7534ffb82f..1ad86b88ff 100644
--- a/deps/v8/src/allocation-tracker.cc
+++ b/deps/v8/src/allocation-tracker.cc
@@ -227,9 +227,7 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
// Mark the new block as FreeSpace to make sure the heap is iterable
// while we are capturing stack trace.
- FreeListNode::FromAddress(addr)->set_size(heap, size);
- DCHECK_EQ(HeapObject::FromAddress(addr)->Size(), size);
- DCHECK(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
+ heap->CreateFillerObjectAt(addr, size);
Isolate* isolate = heap->isolate();
int length = 0;
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 96fd71f11b..8a03a9cf91 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/utils.h"
+#include "src/v8.h"
#if V8_LIBC_BIONIC
#include <malloc.h> // NOLINT
@@ -20,7 +21,7 @@ namespace internal {
void* Malloced::New(size_t size) {
void* result = malloc(size);
if (result == NULL) {
- v8::internal::FatalProcessOutOfMemory("Malloced operator new");
+ V8::FatalProcessOutOfMemory("Malloced operator new");
}
return result;
}
@@ -31,11 +32,6 @@ void Malloced::Delete(void* p) {
}
-void Malloced::FatalProcessOutOfMemory() {
- v8::internal::FatalProcessOutOfMemory("Out of memory");
-}
-
-
#ifdef DEBUG
static void* invalid = static_cast<void*>(NULL);
@@ -96,7 +92,7 @@ void* AlignedAlloc(size_t size, size_t alignment) {
#else
if (posix_memalign(&ptr, alignment, size)) ptr = NULL;
#endif
- if (ptr == NULL) FatalProcessOutOfMemory("AlignedAlloc");
+ if (ptr == NULL) V8::FatalProcessOutOfMemory("AlignedAlloc");
return ptr;
}
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 2fea7b2826..292e1fe23b 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -21,7 +21,6 @@ class Malloced {
void* operator new(size_t size) { return New(size); }
void operator delete(void* p) { Delete(p); }
- static void FatalProcessOutOfMemory();
static void* New(size_t size);
static void Delete(void* p);
};
@@ -59,7 +58,7 @@ class AllStatic {
template <typename T>
T* NewArray(size_t size) {
T* result = new T[size];
- if (result == NULL) Malloced::FatalProcessOutOfMemory();
+ if (result == NULL) FatalProcessOutOfMemory("NewArray");
return result;
}
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
new file mode 100644
index 0000000000..ed434caa00
--- /dev/null
+++ b/deps/v8/src/api-natives.cc
@@ -0,0 +1,588 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api-natives.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
+ Handle<ObjectTemplateInfo> data);
+
+
+MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
+ Handle<FunctionTemplateInfo> data,
+ Handle<Name> name = Handle<Name>());
+
+
+MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
+ Handle<Name> name = Handle<Name>()) {
+ if (data->IsFunctionTemplateInfo()) {
+ return InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(data), name);
+ } else if (data->IsObjectTemplateInfo()) {
+ return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data));
+ } else {
+ return data;
+ }
+}
+
+
+MaybeHandle<Object> DefineAccessorProperty(
+ Isolate* isolate, Handle<JSObject> object, Handle<Name> name,
+ Handle<Object> getter, Handle<Object> setter, Smi* attributes) {
+ DCHECK(PropertyDetails::AttributesField::is_valid(
+ static_cast<PropertyAttributes>(attributes->value())));
+ if (!getter->IsUndefined()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, getter,
+ InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(getter)),
+ Object);
+ }
+ if (!setter->IsUndefined()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, setter,
+ InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(setter)),
+ Object);
+ }
+ RETURN_ON_EXCEPTION(isolate,
+ JSObject::DefineAccessor(
+ object, name, getter, setter,
+ static_cast<PropertyAttributes>(attributes->value())),
+ Object);
+ return object;
+}
+
+
+MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Name> key,
+ Handle<Object> prop_data,
+ Smi* unchecked_attributes) {
+ DCHECK((unchecked_attributes->value() &
+ ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+ // Compute attributes.
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(unchecked_attributes->value());
+
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ Instantiate(isolate, prop_data, key), Object);
+
+#ifdef DEBUG
+ bool duplicate;
+ if (key->IsName()) {
+ LookupIterator it(object, Handle<Name>::cast(key),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+ DCHECK(maybe.has_value);
+ duplicate = it.IsFound();
+ } else {
+ uint32_t index = 0;
+ key->ToArrayIndex(&index);
+ Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index);
+ if (!maybe.has_value) return MaybeHandle<Object>();
+ duplicate = maybe.value;
+ }
+ if (duplicate) {
+ Handle<Object> args[1] = {key};
+ THROW_NEW_ERROR(isolate, NewTypeError("duplicate_template_property",
+ HandleVector(args, 1)),
+ Object);
+ }
+#endif
+
+ RETURN_ON_EXCEPTION(
+ isolate, Runtime::DefineObjectProperty(object, key, value, attributes),
+ Object);
+ return object;
+}
+
+
+void DisableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
+ Handle<Map> old_map(object->map());
+ // Copy map so it won't interfere constructor's initial map.
+ Handle<Map> new_map = Map::Copy(old_map, "DisableAccessChecks");
+ new_map->set_is_access_check_needed(false);
+ JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
+}
+
+
+void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
+ Handle<Map> old_map(object->map());
+ // Copy map so it won't interfere constructor's initial map.
+ Handle<Map> new_map = Map::Copy(old_map, "EnableAccessChecks");
+ new_map->set_is_access_check_needed(true);
+ JSObject::MigrateToMap(object, new_map);
+}
+
+
+class AccessCheckDisableScope {
+ public:
+ AccessCheckDisableScope(Isolate* isolate, Handle<JSObject> obj)
+ : isolate_(isolate),
+ disabled_(obj->map()->is_access_check_needed()),
+ obj_(obj) {
+ if (disabled_) {
+ DisableAccessChecks(isolate_, obj_);
+ }
+ }
+ ~AccessCheckDisableScope() {
+ if (disabled_) {
+ EnableAccessChecks(isolate_, obj_);
+ }
+ }
+
+ private:
+ Isolate* isolate_;
+ const bool disabled_;
+ Handle<JSObject> obj_;
+};
+
+
+MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
+ Handle<TemplateInfo> data) {
+ auto property_list = handle(data->property_list(), isolate);
+ if (property_list->IsUndefined()) return obj;
+ // TODO(dcarney): just use a FixedArray here.
+ NeanderArray properties(property_list);
+ if (properties.length() == 0) return obj;
+ HandleScope scope(isolate);
+ // Disable access checks while instantiating the object.
+ AccessCheckDisableScope access_check_scope(isolate, obj);
+ for (int i = 0; i < properties.length();) {
+ int length = Smi::cast(properties.get(i))->value();
+ if (length == 3) {
+ auto name = handle(Name::cast(properties.get(i + 1)), isolate);
+ auto prop_data = handle(properties.get(i + 2), isolate);
+ auto attributes = Smi::cast(properties.get(i + 3));
+ RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
+ prop_data, attributes),
+ JSObject);
+ } else {
+ DCHECK(length == 4);
+ auto name = handle(Name::cast(properties.get(i + 1)), isolate);
+ auto getter = handle(properties.get(i + 2), isolate);
+ auto setter = handle(properties.get(i + 3), isolate);
+ auto attributes = Smi::cast(properties.get(i + 4));
+ RETURN_ON_EXCEPTION(isolate,
+ DefineAccessorProperty(isolate, obj, name, getter,
+ setter, attributes),
+ JSObject);
+ }
+ i += length + 1;
+ }
+ return obj;
+}
+
+
+MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
+ Handle<ObjectTemplateInfo> data) {
+ // Enter a new scope. Recursion could otherwise create a lot of handles.
+ HandleScope scope(isolate);
+ // Fast path.
+ Handle<JSObject> result;
+ auto info = Handle<ObjectTemplateInfo>::cast(data);
+ auto constructor = handle(info->constructor(), isolate);
+ Handle<JSFunction> cons;
+ if (constructor->IsUndefined()) {
+ cons = isolate->object_function();
+ } else {
+ auto cons_templ = Handle<FunctionTemplateInfo>::cast(constructor);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, cons, InstantiateFunction(isolate, cons_templ), JSFunction);
+ }
+ auto object = isolate->factory()->NewJSObject(cons);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, ConfigureInstance(isolate, object, info), JSFunction);
+ // TODO(dcarney): is this necessary?
+ JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
+ return scope.CloseAndEscape(result);
+}
+
+
+void InstallInCache(Isolate* isolate, int serial_number,
+ Handle<JSFunction> function) {
+ auto cache = isolate->function_cache();
+ if (cache->length() <= serial_number) {
+ int new_size;
+ if (isolate->next_serial_number() < 50) {
+ new_size = 100;
+ } else {
+ new_size = 3 * isolate->next_serial_number() / 2;
+ }
+ cache = FixedArray::CopySize(cache, new_size);
+ isolate->native_context()->set_function_cache(*cache);
+ }
+ cache->set(serial_number, *function);
+}
+
+
+MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
+ Handle<FunctionTemplateInfo> data,
+ Handle<Name> name) {
+ int serial_number = Smi::cast(data->serial_number())->value();
+ // Probe cache.
+ if (!data->do_not_cache()) {
+ auto cache = isolate->function_cache();
+ // Fast case: see if the function has already been instantiated
+ if (serial_number < cache->length()) {
+ Handle<Object> element = FixedArray::get(cache, serial_number);
+ if (element->IsJSFunction()) {
+ return Handle<JSFunction>::cast(element);
+ }
+ }
+ }
+ // Enter a new scope. Recursion could otherwise create a lot of handles.
+ HandleScope scope(isolate);
+ Handle<JSObject> prototype;
+ if (!data->remove_prototype()) {
+ auto prototype_templ = handle(data->prototype_template(), isolate);
+ if (prototype_templ->IsUndefined()) {
+ prototype = isolate->factory()->NewJSObject(isolate->object_function());
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prototype,
+ InstantiateObject(isolate,
+ Handle<ObjectTemplateInfo>::cast(prototype_templ)),
+ JSFunction);
+ }
+ auto parent = handle(data->parent_template(), isolate);
+ if (!parent->IsUndefined()) {
+ Handle<JSFunction> parent_instance;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, parent_instance,
+ InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(parent)),
+ JSFunction);
+ // TODO(dcarney): decide what to do here.
+ Handle<Object> parent_prototype;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, parent_prototype,
+ JSObject::GetProperty(parent_instance,
+ isolate->factory()->prototype_string()),
+ JSFunction);
+ RETURN_ON_EXCEPTION(
+ isolate, JSObject::SetPrototype(prototype, parent_prototype, false),
+ JSFunction);
+ }
+ }
+ auto function = ApiNatives::CreateApiFunction(
+ isolate, data, prototype, ApiNatives::JavaScriptObjectType);
+ if (!name.is_null() && name->IsString()) {
+ function->shared()->set_name(*name);
+ }
+ if (!data->do_not_cache()) {
+ // Cache the function to limit recursion.
+ InstallInCache(isolate, serial_number, function);
+ }
+ auto result = ConfigureInstance(isolate, function, data);
+ if (result.is_null()) {
+ // uncache on error.
+ if (!data->do_not_cache()) {
+ auto cache = isolate->function_cache();
+ cache->set(serial_number, isolate->heap()->undefined_value());
+ }
+ return MaybeHandle<JSFunction>();
+ }
+ return scope.CloseAndEscape(function);
+}
+
+
+class InvokeScope {
+ public:
+ explicit InvokeScope(Isolate* isolate)
+ : isolate_(isolate), save_context_(isolate) {}
+ ~InvokeScope() {
+ bool has_exception = isolate_->has_pending_exception();
+ if (has_exception) {
+ isolate_->ReportPendingMessages();
+ } else {
+ isolate_->clear_pending_message();
+ }
+ }
+
+ private:
+ Isolate* isolate_;
+ SaveContext save_context_;
+};
+
+
+void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
+ int length, Handle<Object>* data) {
+ auto list = handle(templ->property_list(), isolate);
+ if (list->IsUndefined()) {
+ list = NeanderArray(isolate).value();
+ templ->set_property_list(*list);
+ }
+ NeanderArray array(list);
+ array.add(isolate, isolate->factory()->NewNumberFromInt(length));
+ for (int i = 0; i < length; i++) {
+ Handle<Object> value =
+ data[i].is_null()
+ ? Handle<Object>::cast(isolate->factory()->undefined_value())
+ : data[i];
+ array.add(isolate, value);
+ }
+}
+
+} // namespace
+
+
+MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
+ Handle<FunctionTemplateInfo> data) {
+ Isolate* isolate = data->GetIsolate();
+ InvokeScope invoke_scope(isolate);
+ return ::v8::internal::InstantiateFunction(isolate, data);
+}
+
+
+MaybeHandle<JSObject> ApiNatives::InstantiateObject(
+ Handle<ObjectTemplateInfo> data) {
+ Isolate* isolate = data->GetIsolate();
+ InvokeScope invoke_scope(isolate);
+ return ::v8::internal::InstantiateObject(isolate, data);
+}
+
+
+MaybeHandle<FunctionTemplateInfo> ApiNatives::ConfigureInstance(
+ Isolate* isolate, Handle<FunctionTemplateInfo> desc,
+ Handle<JSObject> instance) {
+ // Configure the instance by adding the properties specified by the
+ // instance template.
+ if (desc->instance_template()->IsUndefined()) return desc;
+ InvokeScope invoke_scope(isolate);
+ Handle<ObjectTemplateInfo> instance_template(
+ ObjectTemplateInfo::cast(desc->instance_template()), isolate);
+ RETURN_ON_EXCEPTION(isolate, ::v8::internal::ConfigureInstance(
+ isolate, instance, instance_template),
+ FunctionTemplateInfo);
+ return desc;
+}
+
+
+void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
+ Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes) {
+ const int kSize = 3;
+ DCHECK(Smi::IsValid(static_cast<int>(attributes)));
+ auto attribute_handle =
+ handle(Smi::FromInt(static_cast<int>(attributes)), isolate);
+ Handle<Object> data[kSize] = {name, value, attribute_handle};
+ AddPropertyToPropertyList(isolate, info, kSize, data);
+}
+
+
+void ApiNatives::AddAccessorProperty(Isolate* isolate,
+ Handle<TemplateInfo> info,
+ Handle<Name> name,
+ Handle<FunctionTemplateInfo> getter,
+ Handle<FunctionTemplateInfo> setter,
+ PropertyAttributes attributes) {
+ const int kSize = 4;
+ DCHECK(Smi::IsValid(static_cast<int>(attributes)));
+ auto attribute_handle =
+ handle(Smi::FromInt(static_cast<int>(attributes)), isolate);
+ Handle<Object> data[kSize] = {name, getter, setter, attribute_handle};
+ AddPropertyToPropertyList(isolate, info, kSize, data);
+}
+
+
+void ApiNatives::AddNativeDataProperty(Isolate* isolate,
+ Handle<TemplateInfo> info,
+ Handle<AccessorInfo> property) {
+ auto list = handle(info->property_accessors(), isolate);
+ if (list->IsUndefined()) {
+ list = NeanderArray(isolate).value();
+ info->set_property_accessors(*list);
+ }
+ NeanderArray array(list);
+ array.add(isolate, property);
+}
+
+
+Handle<JSFunction> ApiNatives::CreateApiFunction(
+ Isolate* isolate, Handle<FunctionTemplateInfo> obj,
+ Handle<Object> prototype, ApiInstanceType instance_type) {
+ Handle<Code> code = isolate->builtins()->HandleApiCall();
+ Handle<Code> construct_stub = isolate->builtins()->JSConstructStubApi();
+
+ obj->set_instantiated(true);
+ Handle<JSFunction> result;
+ if (obj->remove_prototype()) {
+ result = isolate->factory()->NewFunctionWithoutPrototype(
+ isolate->factory()->empty_string(), code);
+ } else {
+ int internal_field_count = 0;
+ if (!obj->instance_template()->IsUndefined()) {
+ Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
+ ObjectTemplateInfo::cast(obj->instance_template()));
+ internal_field_count =
+ Smi::cast(instance_template->internal_field_count())->value();
+ }
+
+ // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
+ // JSObject::GetHeaderSize.
+ int instance_size = kPointerSize * internal_field_count;
+ InstanceType type;
+ switch (instance_type) {
+ case JavaScriptObjectType:
+ type = JS_OBJECT_TYPE;
+ instance_size += JSObject::kHeaderSize;
+ break;
+ case GlobalObjectType:
+ type = JS_GLOBAL_OBJECT_TYPE;
+ instance_size += JSGlobalObject::kSize;
+ break;
+ case GlobalProxyType:
+ type = JS_GLOBAL_PROXY_TYPE;
+ instance_size += JSGlobalProxy::kSize;
+ break;
+ default:
+ UNREACHABLE();
+ type = JS_OBJECT_TYPE; // Keep the compiler happy.
+ break;
+ }
+
+ result = isolate->factory()->NewFunction(
+ isolate->factory()->empty_string(), code, prototype, type,
+ instance_size, obj->read_only_prototype(), true);
+ }
+
+ result->shared()->set_length(obj->length());
+ Handle<Object> class_name(obj->class_name(), isolate);
+ if (class_name->IsString()) {
+ result->shared()->set_instance_class_name(*class_name);
+ result->shared()->set_name(*class_name);
+ }
+ result->shared()->set_function_data(*obj);
+ result->shared()->set_construct_stub(*construct_stub);
+ result->shared()->DontAdaptArguments();
+
+ if (obj->remove_prototype()) {
+ DCHECK(result->shared()->IsApiFunction());
+ DCHECK(!result->has_initial_map());
+ DCHECK(!result->has_prototype());
+ return result;
+ }
+
+#ifdef DEBUG
+ LookupIterator it(handle(JSObject::cast(result->prototype())),
+ isolate->factory()->constructor_string(),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ MaybeHandle<Object> maybe_prop = Object::GetProperty(&it);
+ DCHECK(it.IsFound());
+ DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result));
+#endif
+
+ // Down from here is only valid for API functions that can be used as a
+ // constructor (don't set the "remove prototype" flag).
+
+ Handle<Map> map(result->initial_map());
+
+ // Mark as undetectable if needed.
+ if (obj->undetectable()) {
+ map->set_is_undetectable();
+ }
+
+ // Mark as hidden for the __proto__ accessor if needed.
+ if (obj->hidden_prototype()) {
+ map->set_is_hidden_prototype();
+ }
+
+ // Mark as needs_access_check if needed.
+ if (obj->needs_access_check()) {
+ map->set_is_access_check_needed(true);
+ }
+
+ // Set interceptor information in the map.
+ if (!obj->named_property_handler()->IsUndefined()) {
+ map->set_has_named_interceptor();
+ }
+ if (!obj->indexed_property_handler()->IsUndefined()) {
+ map->set_has_indexed_interceptor();
+ }
+
+ // Set instance call-as-function information in the map.
+ if (!obj->instance_call_handler()->IsUndefined()) {
+ map->set_has_instance_call_handler();
+ }
+
+ // Recursively copy parent instance templates' accessors,
+ // 'data' may be modified.
+ int max_number_of_additional_properties = 0;
+ int max_number_of_static_properties = 0;
+ FunctionTemplateInfo* info = *obj;
+ while (true) {
+ if (!info->instance_template()->IsUndefined()) {
+ Object* props = ObjectTemplateInfo::cast(info->instance_template())
+ ->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props, isolate);
+ NeanderArray props_array(props_handle);
+ max_number_of_additional_properties += props_array.length();
+ }
+ }
+ if (!info->property_accessors()->IsUndefined()) {
+ Object* props = info->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props, isolate);
+ NeanderArray props_array(props_handle);
+ max_number_of_static_properties += props_array.length();
+ }
+ }
+ Object* parent = info->parent_template();
+ if (parent->IsUndefined()) break;
+ info = FunctionTemplateInfo::cast(parent);
+ }
+
+ Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
+
+ // Use a temporary FixedArray to acculumate static accessors
+ int valid_descriptors = 0;
+ Handle<FixedArray> array;
+ if (max_number_of_static_properties > 0) {
+ array = isolate->factory()->NewFixedArray(max_number_of_static_properties);
+ }
+
+ while (true) {
+ // Install instance descriptors
+ if (!obj->instance_template()->IsUndefined()) {
+ Handle<ObjectTemplateInfo> instance = Handle<ObjectTemplateInfo>(
+ ObjectTemplateInfo::cast(obj->instance_template()), isolate);
+ Handle<Object> props =
+ Handle<Object>(instance->property_accessors(), isolate);
+ if (!props->IsUndefined()) {
+ Map::AppendCallbackDescriptors(map, props);
+ }
+ }
+ // Accumulate static accessors
+ if (!obj->property_accessors()->IsUndefined()) {
+ Handle<Object> props = Handle<Object>(obj->property_accessors(), isolate);
+ valid_descriptors =
+ AccessorInfo::AppendUnique(props, array, valid_descriptors);
+ }
+ // Climb parent chain
+ Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate);
+ if (parent->IsUndefined()) break;
+ obj = Handle<FunctionTemplateInfo>::cast(parent);
+ }
+
+ // Install accumulated static accessors
+ for (int i = 0; i < valid_descriptors; i++) {
+ Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
+ JSObject::SetAccessor(result, accessor).Assert();
+ }
+
+ DCHECK(result->shared()->IsApiFunction());
+ return result;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
new file mode 100644
index 0000000000..9f97b5d018
--- /dev/null
+++ b/deps/v8/src/api-natives.h
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_API_NATIVES_H_
+#define V8_API_NATIVES_H_
+
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class ApiNatives {
+ public:
+ MUST_USE_RESULT static MaybeHandle<JSFunction> InstantiateFunction(
+ Handle<FunctionTemplateInfo> data);
+
+ MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
+ Handle<ObjectTemplateInfo> data);
+
+ MUST_USE_RESULT static MaybeHandle<FunctionTemplateInfo> ConfigureInstance(
+ Isolate* isolate, Handle<FunctionTemplateInfo> instance,
+ Handle<JSObject> data);
+
+ enum ApiInstanceType {
+ JavaScriptObjectType,
+ GlobalObjectType,
+ GlobalProxyType
+ };
+
+ static Handle<JSFunction> CreateApiFunction(Isolate* isolate,
+ Handle<FunctionTemplateInfo> obj,
+ Handle<Object> prototype,
+ ApiInstanceType instance_type);
+
+ static void AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
+ Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attributes);
+
+ static void AddAccessorProperty(Isolate* isolate, Handle<TemplateInfo> info,
+ Handle<Name> name,
+ Handle<FunctionTemplateInfo> getter,
+ Handle<FunctionTemplateInfo> setter,
+ PropertyAttributes attributes);
+
+ static void AddNativeDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
+ Handle<AccessorInfo> property);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index c3f84534b4..4d07f8216e 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -12,6 +12,7 @@
#include "include/v8-debug.h"
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
+#include "src/api-natives.h"
#include "src/assert-scope.h"
#include "src/background-parsing-task.h"
#include "src/base/functional.h"
@@ -206,7 +207,21 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
}
-StartupData V8::CreateSnapshotDataBlob() {
+bool RunExtraCode(Isolate* isolate, char* utf8_source) {
+ // Run custom script if provided.
+ TryCatch try_catch;
+ Local<String> source_string = String::NewFromUtf8(isolate, utf8_source);
+ if (try_catch.HasCaught()) return false;
+ ScriptOrigin origin(String::NewFromUtf8(isolate, "<embedded script>"));
+ ScriptCompiler::Source source(source_string, origin);
+ Local<Script> script = ScriptCompiler::Compile(isolate, &source);
+ if (try_catch.HasCaught()) return false;
+ script->Run();
+ return !try_catch.HasCaught();
+}
+
+
+StartupData V8::CreateSnapshotDataBlob(char* custom_source) {
Isolate::CreateParams params;
params.enable_serializer = true;
Isolate* isolate = v8::Isolate::New(params);
@@ -215,9 +230,16 @@ StartupData V8::CreateSnapshotDataBlob() {
Isolate::Scope isolate_scope(isolate);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
Persistent<Context> context;
+ i::Snapshot::Metadata metadata;
{
HandleScope handle_scope(isolate);
- context.Reset(isolate, Context::New(isolate));
+ Handle<Context> new_context = Context::New(isolate);
+ context.Reset(isolate, new_context);
+ if (custom_source != NULL) {
+ metadata.set_embeds_script(true);
+ Context::Scope context_scope(new_context);
+ if (!RunExtraCode(isolate, custom_source)) context.Reset();
+ }
}
if (!context.IsEmpty()) {
// Make sure all builtin scripts are cached.
@@ -242,10 +264,7 @@ StartupData V8::CreateSnapshotDataBlob() {
context_ser.Serialize(&raw_context);
ser.SerializeWeakReferences();
- i::SnapshotData sd(snapshot_sink, ser);
- i::SnapshotData csd(context_sink, context_ser);
-
- result = i::Snapshot::CreateSnapshotBlob(sd.RawData(), csd.RawData());
+ result = i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
}
}
isolate->Dispose();
@@ -387,7 +406,9 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
LOG_API(isolate, "Persistent::New");
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef VERIFY_HEAP
- (*obj)->ObjectVerify();
+ if (i::FLAG_verify_heap) {
+ (*obj)->ObjectVerify();
+ }
#endif // VERIFY_HEAP
return result.location();
}
@@ -396,7 +417,9 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
i::Object** V8::CopyPersistent(i::Object** obj) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
#ifdef VERIFY_HEAP
- (*obj)->ObjectVerify();
+ if (i::FLAG_verify_heap) {
+ (*obj)->ObjectVerify();
+ }
#endif // VERIFY_HEAP
return result.location();
}
@@ -409,17 +432,20 @@ void V8::MakeWeak(i::Object** object, void* parameter,
void V8::MakePhantom(i::Object** object, void* parameter,
+ int internal_field_index1, int internal_field_index2,
PhantomCallbackData<void>::Callback weak_callback) {
- i::GlobalHandles::MakePhantom(object, parameter, weak_callback);
-}
-
-
-void V8::MakePhantom(
- i::Object** object,
- InternalFieldsCallbackData<void, void>::Callback weak_callback,
- int internal_field_index1, int internal_field_index2) {
- i::GlobalHandles::MakePhantom(object, weak_callback, internal_field_index1,
- internal_field_index2);
+ if (internal_field_index1 == 0) {
+ if (internal_field_index2 == 1) {
+ i::GlobalHandles::MakePhantom(object, parameter, 2, weak_callback);
+ } else {
+ DCHECK_EQ(internal_field_index2, kNoInternalFieldIndex);
+ i::GlobalHandles::MakePhantom(object, parameter, 1, weak_callback);
+ }
+ } else {
+ DCHECK_EQ(internal_field_index1, kNoInternalFieldIndex);
+ DCHECK_EQ(internal_field_index2, kNoInternalFieldIndex);
+ i::GlobalHandles::MakePhantom(object, parameter, 0, weak_callback);
+ }
}
@@ -706,39 +732,17 @@ static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
}
-static void TemplateSet(i::Isolate* isolate,
- v8::Template* templ,
- int length,
- v8::Handle<v8::Data>* data) {
- i::Handle<i::Object> list(Utils::OpenHandle(templ)->property_list(), isolate);
- if (list->IsUndefined()) {
- list = NeanderArray(isolate).value();
- Utils::OpenHandle(templ)->set_property_list(*list);
- }
- NeanderArray array(list);
- array.add(isolate, isolate->factory()->NewNumberFromInt(length));
- for (int i = 0; i < length; i++) {
- i::Handle<i::Object> value = data[i].IsEmpty() ?
- i::Handle<i::Object>(isolate->factory()->undefined_value()) :
- Utils::OpenHandle(*data[i]);
- array.add(isolate, value);
- }
-}
-
-
void Template::Set(v8::Handle<Name> name,
v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto templ = Utils::OpenHandle(this);
+ i::Isolate* isolate = templ->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- const int kSize = 3;
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8::Handle<v8::Data> data[kSize] = {
- name,
- value,
- v8::Integer::New(v8_isolate, attribute)};
- TemplateSet(isolate, this, kSize, data);
+ // TODO(dcarney): split api to allow values of v8::Value or v8::TemplateInfo.
+ i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
+ Utils::OpenHandle(*value),
+ static_cast<PropertyAttributes>(attribute));
}
@@ -750,19 +754,16 @@ void Template::SetAccessorProperty(
v8::AccessControl access_control) {
// TODO(verwaest): Remove |access_control|.
DCHECK_EQ(v8::DEFAULT, access_control);
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto templ = Utils::OpenHandle(this);
+ auto isolate = templ->GetIsolate();
ENTER_V8(isolate);
DCHECK(!name.IsEmpty());
DCHECK(!getter.IsEmpty() || !setter.IsEmpty());
i::HandleScope scope(isolate);
- const int kSize = 5;
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- v8::Handle<v8::Data> data[kSize] = {
- name,
- getter,
- setter,
- v8::Integer::New(v8_isolate, attribute)};
- TemplateSet(isolate, this, kSize, data);
+ i::ApiNatives::AddAccessorProperty(
+ isolate, templ, Utils::OpenHandle(*name),
+ Utils::OpenHandle(*getter, true), Utils::OpenHandle(*setter, true),
+ static_cast<PropertyAttributes>(attribute));
}
@@ -788,10 +789,19 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
}
+static void EnsureNotInstantiated(i::Handle<i::FunctionTemplateInfo> info,
+ const char* func) {
+ Utils::ApiCheck(!info->instantiated(), func,
+ "FunctionTemplate already instantiated");
+}
+
+
void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto info = Utils::OpenHandle(this);
+ EnsureNotInstantiated(info, "v8::FunctionTemplate::Inherit");
+ i::Isolate* isolate = info->GetIsolate();
ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
+ info->set_parent_template(*Utils::OpenHandle(*value));
}
@@ -846,25 +856,8 @@ Local<FunctionTemplate> FunctionTemplate::New(
Local<Signature> Signature::New(Isolate* isolate,
- Handle<FunctionTemplate> receiver, int argc,
- Handle<FunctionTemplate> argv[]) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Signature::New");
- ENTER_V8(i_isolate);
- i::Handle<i::Struct> struct_obj =
- i_isolate->factory()->NewStruct(i::SIGNATURE_INFO_TYPE);
- i::Handle<i::SignatureInfo> obj =
- i::Handle<i::SignatureInfo>::cast(struct_obj);
- if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
- if (argc > 0) {
- i::Handle<i::FixedArray> args = i_isolate->factory()->NewFixedArray(argc);
- for (int i = 0; i < argc; i++) {
- if (!argv[i].IsEmpty())
- args->set(i, *Utils::OpenHandle(*argv[i]));
- }
- obj->set_args(*args);
- }
- return Utils::ToLocal(obj);
+ Handle<FunctionTemplate> receiver) {
+ return Utils::SignatureToLocal(Utils::OpenHandle(*receiver));
}
@@ -875,122 +868,6 @@ Local<AccessorSignature> AccessorSignature::New(
}
-template<typename Operation>
-static Local<Operation> NewDescriptor(
- Isolate* isolate,
- const i::DeclaredAccessorDescriptorData& data,
- Data* previous_descriptor) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::DeclaredAccessorDescriptor> previous =
- i::Handle<i::DeclaredAccessorDescriptor>();
- if (previous_descriptor != NULL) {
- previous = Utils::OpenHandle(
- static_cast<DeclaredAccessorDescriptor*>(previous_descriptor));
- }
- i::Handle<i::DeclaredAccessorDescriptor> descriptor =
- i::DeclaredAccessorDescriptor::Create(internal_isolate, data, previous);
- return Utils::Convert<i::DeclaredAccessorDescriptor, Operation>(descriptor);
-}
-
-
-Local<RawOperationDescriptor>
-ObjectOperationDescriptor::NewInternalFieldDereference(
- Isolate* isolate,
- int internal_field) {
- i::DeclaredAccessorDescriptorData data;
- data.type = i::kDescriptorObjectDereference;
- data.object_dereference_descriptor.internal_field = internal_field;
- return NewDescriptor<RawOperationDescriptor>(isolate, data, NULL);
-}
-
-
-Local<RawOperationDescriptor> RawOperationDescriptor::NewRawShift(
- Isolate* isolate,
- int16_t byte_offset) {
- i::DeclaredAccessorDescriptorData data;
- data.type = i::kDescriptorPointerShift;
- data.pointer_shift_descriptor.byte_offset = byte_offset;
- return NewDescriptor<RawOperationDescriptor>(isolate, data, this);
-}
-
-
-Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewHandleDereference(
- Isolate* isolate) {
- i::DeclaredAccessorDescriptorData data;
- data.type = i::kDescriptorReturnObject;
- return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, this);
-}
-
-
-Local<RawOperationDescriptor> RawOperationDescriptor::NewRawDereference(
- Isolate* isolate) {
- i::DeclaredAccessorDescriptorData data;
- data.type = i::kDescriptorPointerDereference;
- return NewDescriptor<RawOperationDescriptor>(isolate, data, this);
-}
-
-
-Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewPointerCompare(
- Isolate* isolate,
- void* compare_value) {
- i::DeclaredAccessorDescriptorData data;
- data.type = i::kDescriptorPointerCompare;
- data.pointer_compare_descriptor.compare_value = compare_value;
- return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, this);
-}
-
-
-Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewPrimitiveValue(
- Isolate* isolate,
- DeclaredAccessorDescriptorDataType data_type,
- uint8_t bool_offset) {
- i::DeclaredAccessorDescriptorData data;
- data.type = i::kDescriptorPrimitiveValue;
- data.primitive_value_descriptor.data_type = data_type;
- data.primitive_value_descriptor.bool_offset = bool_offset;
- return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, this);
-}
-
-
-template<typename T>
-static Local<DeclaredAccessorDescriptor> NewBitmaskCompare(
- Isolate* isolate,
- T bitmask,
- T compare_value,
- RawOperationDescriptor* operation) {
- i::DeclaredAccessorDescriptorData data;
- data.type = i::kDescriptorBitmaskCompare;
- data.bitmask_compare_descriptor.bitmask = bitmask;
- data.bitmask_compare_descriptor.compare_value = compare_value;
- data.bitmask_compare_descriptor.size = sizeof(T);
- return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, operation);
-}
-
-
-Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewBitmaskCompare8(
- Isolate* isolate,
- uint8_t bitmask,
- uint8_t compare_value) {
- return NewBitmaskCompare(isolate, bitmask, compare_value, this);
-}
-
-
-Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewBitmaskCompare16(
- Isolate* isolate,
- uint16_t bitmask,
- uint16_t compare_value) {
- return NewBitmaskCompare(isolate, bitmask, compare_value, this);
-}
-
-
-Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewBitmaskCompare32(
- Isolate* isolate,
- uint32_t bitmask,
- uint32_t compare_value) {
- return NewBitmaskCompare(isolate, bitmask, compare_value, this);
-}
-
-
Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
Handle<FunctionTemplate> types[1] = { type };
return TypeSwitch::New(1, types);
@@ -1034,7 +911,9 @@ int TypeSwitch::match(v8::Handle<Value> value) {
void FunctionTemplate::SetCallHandler(FunctionCallback callback,
v8::Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto info = Utils::OpenHandle(this);
+ EnsureNotInstantiated(info, "v8::FunctionTemplate::SetCallHandler");
+ i::Isolate* isolate = info->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
@@ -1046,7 +925,7 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(this)->set_call_code(*obj);
+ info->set_call_code(*obj);
}
@@ -1089,23 +968,6 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
}
-static i::Handle<i::AccessorInfo> MakeAccessorInfo(
- v8::Handle<Name> name,
- v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
- void* setter_ignored,
- void* data_ignored,
- v8::AccessControl settings,
- v8::PropertyAttribute attributes,
- v8::Handle<AccessorSignature> signature) {
- i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
- if (descriptor.IsEmpty()) return i::Handle<i::DeclaredAccessorInfo>();
- i::Handle<i::DeclaredAccessorInfo> obj =
- isolate->factory()->NewDeclaredAccessorInfo();
- obj->set_descriptor(*Utils::OpenHandle(*descriptor));
- return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
-}
-
-
Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this, true);
if (!Utils::ApiCheck(!handle.is_null(),
@@ -1127,37 +989,47 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
void FunctionTemplate::SetLength(int length) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto info = Utils::OpenHandle(this);
+ EnsureNotInstantiated(info, "v8::FunctionTemplate::SetLength");
+ auto isolate = info->GetIsolate();
ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_length(length);
+ info->set_length(length);
}
void FunctionTemplate::SetClassName(Handle<String> name) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto info = Utils::OpenHandle(this);
+ EnsureNotInstantiated(info, "v8::FunctionTemplate::SetClassName");
+ auto isolate = info->GetIsolate();
ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
+ info->set_class_name(*Utils::OpenHandle(*name));
}
void FunctionTemplate::SetHiddenPrototype(bool value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto info = Utils::OpenHandle(this);
+ EnsureNotInstantiated(info, "v8::FunctionTemplate::SetHiddenPrototype");
+ auto isolate = info->GetIsolate();
ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_hidden_prototype(value);
+ info->set_hidden_prototype(value);
}
void FunctionTemplate::ReadOnlyPrototype() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto info = Utils::OpenHandle(this);
+ EnsureNotInstantiated(info, "v8::FunctionTemplate::ReadOnlyPrototype");
+ auto isolate = info->GetIsolate();
ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_read_only_prototype(true);
+ info->set_read_only_prototype(true);
}
void FunctionTemplate::RemovePrototype() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto info = Utils::OpenHandle(this);
+ EnsureNotInstantiated(info, "v8::FunctionTemplate::RemovePrototype");
+ auto isolate = info->GetIsolate();
ENTER_V8(isolate);
- Utils::OpenHandle(this)->set_remove_prototype(true);
+ info->set_remove_prototype(true);
}
@@ -1213,20 +1085,6 @@ static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
}
-static inline void AddPropertyToTemplate(
- i::Handle<i::TemplateInfo> info,
- i::Handle<i::AccessorInfo> obj) {
- i::Isolate* isolate = info->GetIsolate();
- i::Handle<i::Object> list(info->property_accessors(), isolate);
- if (list->IsUndefined()) {
- list = NeanderArray(isolate).value();
- info->set_property_accessors(*list);
- }
- NeanderArray array(list);
- array.add(isolate, obj);
-}
-
-
static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
i::Isolate* isolate,
Template* template_obj) {
@@ -1253,30 +1111,18 @@ static bool TemplateSetAccessor(
AccessControl settings,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature) {
- i::Isolate* isolate = Utils::OpenHandle(template_obj)->GetIsolate();
+ auto isolate = Utils::OpenHandle(template_obj)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
- name, getter, setter, data, settings, attribute, signature);
+ auto obj = MakeAccessorInfo(name, getter, setter, data, settings, attribute,
+ signature);
if (obj.is_null()) return false;
- i::Handle<i::TemplateInfo> info = GetTemplateInfo(isolate, template_obj);
- AddPropertyToTemplate(info, obj);
+ auto info = GetTemplateInfo(isolate, template_obj);
+ i::ApiNatives::AddNativeDataProperty(isolate, info, obj);
return true;
}
-bool Template::SetDeclaredAccessor(
- Local<Name> name,
- Local<DeclaredAccessorDescriptor> descriptor,
- PropertyAttribute attribute,
- Local<AccessorSignature> signature,
- AccessControl settings) {
- void* null = NULL;
- return TemplateSetAccessor(
- this, name, descriptor, null, null, settings, attribute, signature);
-}
-
-
void Template::SetNativeDataProperty(v8::Local<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter,
@@ -1327,23 +1173,17 @@ void ObjectTemplate::SetAccessor(v8::Handle<Name> name,
template <typename Getter, typename Setter, typename Query, typename Deleter,
typename Enumerator>
-static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
- Getter getter, Setter setter,
- Query query, Deleter remover,
- Enumerator enumerator,
- Handle<Value> data,
- bool can_intercept_symbols) {
+static void ObjectTemplateSetNamedPropertyHandler(
+ ObjectTemplate* templ, Getter getter, Setter setter, Query query,
+ Deleter remover, Enumerator enumerator, Handle<Value> data,
+ bool can_intercept_symbols, PropertyHandlerFlags flags) {
i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(isolate, templ);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(templ)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ auto cons = EnsureConstructor(isolate, templ);
+ EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
+ auto obj = i::Handle<i::InterceptorInfo>::cast(
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
@@ -1352,6 +1192,8 @@ static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
obj->set_flags(0);
obj->set_can_intercept_symbols(can_intercept_symbols);
+ obj->set_all_can_read(static_cast<int>(flags) &
+ static_cast<int>(PropertyHandlerFlags::kAllCanRead));
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1366,15 +1208,16 @@ void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
NamedPropertyEnumeratorCallback enumerator, Handle<Value> data) {
ObjectTemplateSetNamedPropertyHandler(this, getter, setter, query, remover,
- enumerator, data, false);
+ enumerator, data, false,
+ PropertyHandlerFlags::kNone);
}
void ObjectTemplate::SetHandler(
const NamedPropertyHandlerConfiguration& config) {
- ObjectTemplateSetNamedPropertyHandler(this, config.getter, config.setter,
- config.query, config.deleter,
- config.enumerator, config.data, true);
+ ObjectTemplateSetNamedPropertyHandler(
+ this, config.getter, config.setter, config.query, config.deleter,
+ config.enumerator, config.data, true, config.flags);
}
@@ -1382,10 +1225,8 @@ void ObjectTemplate::MarkAsUndetectable() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(isolate, this);
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ auto cons = EnsureConstructor(isolate, this);
+ EnsureNotInstantiated(cons, "v8::ObjectTemplate::MarkAsUndetectable");
cons->set_undetectable(true);
}
@@ -1398,7 +1239,8 @@ void ObjectTemplate::SetAccessCheckCallbacks(
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(isolate, this);
+ auto cons = EnsureConstructor(isolate, this);
+ EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetAccessCheckCallbacks");
i::Handle<i::Struct> struct_info =
isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
@@ -1413,9 +1255,6 @@ void ObjectTemplate::SetAccessCheckCallbacks(
}
info->set_data(*Utils::OpenHandle(*data));
- i::FunctionTemplateInfo* constructor =
- i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
cons->set_access_check_info(*info);
cons->set_needs_access_check(turned_on_by_default);
}
@@ -1426,14 +1265,10 @@ void ObjectTemplate::SetHandler(
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(isolate, this);
- i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ auto cons = EnsureConstructor(isolate, this);
+ EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
+ auto obj = i::Handle<i::InterceptorInfo>::cast(
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
if (config.getter != 0) SET_FIELD_WRAPPED(obj, set_getter, config.getter);
if (config.setter != 0) SET_FIELD_WRAPPED(obj, set_setter, config.setter);
@@ -1443,6 +1278,8 @@ void ObjectTemplate::SetHandler(
SET_FIELD_WRAPPED(obj, set_enumerator, config.enumerator);
}
obj->set_flags(0);
+ obj->set_all_can_read(static_cast<int>(config.flags) &
+ static_cast<int>(PropertyHandlerFlags::kAllCanRead));
v8::Local<v8::Value> data = config.data;
if (data.IsEmpty()) {
@@ -1458,10 +1295,8 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(isolate, this);
- i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(this)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
+ auto cons = EnsureConstructor(isolate, this);
+ EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
@@ -1633,6 +1468,7 @@ Local<Value> Script::Run() {
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run");
ENTER_V8(isolate);
+ i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
@@ -1653,10 +1489,9 @@ Local<UnboundScript> Script::GetUnboundScript() {
}
-Local<UnboundScript> ScriptCompiler::CompileUnbound(
- Isolate* v8_isolate,
- Source* source,
- CompileOptions options) {
+Local<UnboundScript> ScriptCompiler::CompileUnboundInternal(
+ Isolate* v8_isolate, Source* source, CompileOptions options,
+ bool is_module) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileUnbound()",
return Local<UnboundScript>());
@@ -1689,9 +1524,11 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
ENTER_V8(isolate);
i::SharedFunctionInfo* raw_result = NULL;
{ i::HandleScope scope(isolate);
+ i::HistogramTimerScope total(isolate->counters()->compile_script(), true);
i::Handle<i::Object> name_obj;
int line_offset = 0;
int column_offset = 0;
+ bool is_embedder_debug_script = false;
bool is_shared_cross_origin = false;
if (!source->resource_name.IsEmpty()) {
name_obj = Utils::OpenHandle(*(source->resource_name));
@@ -1704,15 +1541,18 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
static_cast<int>(source->resource_column_offset->Value());
}
if (!source->resource_is_shared_cross_origin.IsEmpty()) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
is_shared_cross_origin =
- source->resource_is_shared_cross_origin == v8::True(v8_isolate);
+ source->resource_is_shared_cross_origin->IsTrue();
+ }
+ if (!source->resource_is_embedder_debug_script.IsEmpty()) {
+ is_embedder_debug_script =
+ source->resource_is_embedder_debug_script->IsTrue();
}
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::SharedFunctionInfo> result = i::Compiler::CompileScript(
- str, name_obj, line_offset, column_offset, is_shared_cross_origin,
- isolate->native_context(), NULL, &script_data, options,
- i::NOT_NATIVES_CODE);
+ str, name_obj, line_offset, column_offset, is_embedder_debug_script,
+ is_shared_cross_origin, isolate->native_context(), NULL, &script_data,
+ options, i::NOT_NATIVES_CODE, is_module);
has_pending_exception = result.is_null();
if (has_pending_exception && script_data != NULL) {
// This case won't happen during normal operation; we have compiled
@@ -1741,13 +1581,20 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
}
+Local<UnboundScript> ScriptCompiler::CompileUnbound(Isolate* v8_isolate,
+ Source* source,
+ CompileOptions options) {
+ return CompileUnboundInternal(v8_isolate, source, options, false);
+}
+
+
Local<Script> ScriptCompiler::Compile(
Isolate* v8_isolate,
Source* source,
CompileOptions options) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()", return Local<Script>());
- LOG_API(isolate, "ScriptCompiler::CompiletBound()");
+ LOG_API(isolate, "ScriptCompiler::CompileBound()");
ENTER_V8(isolate);
Local<UnboundScript> generic = CompileUnbound(v8_isolate, source, options);
if (generic.IsEmpty()) return Local<Script>();
@@ -1755,6 +1602,146 @@ Local<Script> ScriptCompiler::Compile(
}
+Local<Script> ScriptCompiler::CompileModule(Isolate* v8_isolate, Source* source,
+ CompileOptions options) {
+ CHECK(i::FLAG_harmony_modules);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileModule()",
+ return Local<Script>());
+ LOG_API(isolate, "ScriptCompiler::CompileModule()");
+ ENTER_V8(isolate);
+ Local<UnboundScript> generic =
+ CompileUnboundInternal(v8_isolate, source, options, true);
+ if (generic.IsEmpty()) return Local<Script>();
+ return generic->BindToCurrentContext();
+}
+
+
+class IsIdentifierHelper {
+ public:
+ IsIdentifierHelper() : is_identifier_(false), first_char_(true) {}
+
+ bool Check(i::String* string) {
+ i::ConsString* cons_string = i::String::VisitFlat(this, string, 0);
+ if (cons_string == NULL) return is_identifier_;
+ // We don't support cons strings here.
+ return false;
+ }
+ void VisitOneByteString(const uint8_t* chars, int length) {
+ for (int i = 0; i < length; ++i) {
+ if (first_char_) {
+ first_char_ = false;
+ is_identifier_ = unicode_cache_.IsIdentifierStart(chars[0]);
+ } else {
+ is_identifier_ &= unicode_cache_.IsIdentifierPart(chars[i]);
+ }
+ }
+ }
+ void VisitTwoByteString(const uint16_t* chars, int length) {
+ for (int i = 0; i < length; ++i) {
+ if (first_char_) {
+ first_char_ = false;
+ is_identifier_ = unicode_cache_.IsIdentifierStart(chars[0]);
+ } else {
+ is_identifier_ &= unicode_cache_.IsIdentifierPart(chars[i]);
+ }
+ }
+ }
+
+ private:
+ bool is_identifier_;
+ bool first_char_;
+ i::UnicodeCache unicode_cache_;
+ DISALLOW_COPY_AND_ASSIGN(IsIdentifierHelper);
+};
+
+
+Local<Function> ScriptCompiler::CompileFunctionInContext(
+ Isolate* v8_isolate, Source* source, Local<Context> v8_context,
+ size_t arguments_count, Local<String> arguments[],
+ size_t context_extension_count, Local<Object> context_extensions[]) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileFunctionInContext()",
+ return Local<Function>());
+ LOG_API(isolate, "ScriptCompiler::CompileFunctionInContext()");
+ ENTER_V8(isolate);
+
+ i::Handle<i::String> source_string;
+ if (arguments_count) {
+ source_string =
+ Utils::OpenHandle(*v8::String::NewFromUtf8(v8_isolate, "(function("));
+ for (size_t i = 0; i < arguments_count; ++i) {
+ IsIdentifierHelper helper;
+ if (!helper.Check(*Utils::OpenHandle(*arguments[i]))) {
+ return Local<Function>();
+ }
+ i::MaybeHandle<i::String> maybe_source =
+ isolate->factory()->NewConsString(source_string,
+ Utils::OpenHandle(*arguments[i]));
+ if (!maybe_source.ToHandle(&source_string)) {
+ return Local<Function>();
+ }
+ if (i + 1 == arguments_count) continue;
+ maybe_source = isolate->factory()->NewConsString(
+ source_string,
+ isolate->factory()->LookupSingleCharacterStringFromCode(','));
+ if (!maybe_source.ToHandle(&source_string)) {
+ return Local<Function>();
+ }
+ }
+ i::Handle<i::String> brackets =
+ Utils::OpenHandle(*v8::String::NewFromUtf8(v8_isolate, "){"));
+ i::MaybeHandle<i::String> maybe_source =
+ isolate->factory()->NewConsString(source_string, brackets);
+ if (!maybe_source.ToHandle(&source_string)) {
+ return Local<Function>();
+ }
+ } else {
+ source_string =
+ Utils::OpenHandle(*v8::String::NewFromUtf8(v8_isolate, "(function(){"));
+ }
+
+ int scope_position = source_string->length();
+ i::MaybeHandle<i::String> maybe_source = isolate->factory()->NewConsString(
+ source_string, Utils::OpenHandle(*source->source_string));
+ if (!maybe_source.ToHandle(&source_string)) {
+ return Local<Function>();
+ }
+ // Include \n in case the source contains a line end comment.
+ i::Handle<i::String> brackets =
+ Utils::OpenHandle(*v8::String::NewFromUtf8(v8_isolate, "\n})"));
+ maybe_source = isolate->factory()->NewConsString(source_string, brackets);
+ if (!maybe_source.ToHandle(&source_string)) {
+ return Local<Function>();
+ }
+
+ i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
+ i::Handle<i::SharedFunctionInfo> outer_info(context->closure()->shared(),
+ isolate);
+ for (size_t i = 0; i < context_extension_count; ++i) {
+ i::Handle<i::JSObject> extension =
+ Utils::OpenHandle(*context_extensions[i]);
+ i::Handle<i::JSFunction> closure(context->closure(), isolate);
+ context = isolate->factory()->NewWithContext(closure, context, extension);
+ }
+
+ EXCEPTION_PREAMBLE(isolate);
+ i::MaybeHandle<i::JSFunction> maybe_fun = i::Compiler::GetFunctionFromEval(
+ source_string, outer_info, context, i::SLOPPY,
+ i::ONLY_SINGLE_FUNCTION_LITERAL, scope_position);
+ i::Handle<i::JSFunction> fun;
+ has_pending_exception = !maybe_fun.ToHandle(&fun);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Function>());
+
+ i::MaybeHandle<i::Object> result = i::Execution::Call(
+ isolate, fun, Utils::OpenHandle(*v8_context->Global()), 0, NULL);
+ i::Handle<i::Object> final_result;
+ has_pending_exception = !result.ToHandle(&final_result);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Function>());
+ return Utils::ToLocal(i::Handle<i::JSFunction>::cast(final_result));
+}
+
+
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@@ -1790,8 +1777,12 @@ Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
static_cast<int>(origin.ResourceColumnOffset()->Value())));
}
if (!origin.ResourceIsSharedCrossOrigin().IsEmpty()) {
- script->set_is_shared_cross_origin(origin.ResourceIsSharedCrossOrigin() ==
- v8::True(v8_isolate));
+ script->set_is_shared_cross_origin(
+ origin.ResourceIsSharedCrossOrigin()->IsTrue());
+ }
+ if (!origin.ResourceIsEmbedderDebugScript().IsEmpty()) {
+ script->set_is_embedder_debug_script(
+ origin.ResourceIsEmbedderDebugScript()->IsTrue());
}
source->info->set_script(script);
source->info->SetContext(isolate->native_context());
@@ -1800,8 +1791,8 @@ Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
// Do the parsing tasks which need to be done on the main thread. This will
// also handle parse errors.
- source->parser->Internalize();
- source->parser->HandleSourceURLComments();
+ source->parser->Internalize(source->info.get());
+ source->parser->HandleSourceURLComments(source->info.get());
i::Handle<i::SharedFunctionInfo> result =
i::Handle<i::SharedFunctionInfo>::null();
@@ -1965,15 +1956,17 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
i::HandleScope scope(isolate_);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
i::Handle<i::String> name = isolate_->factory()->stack_string();
+ {
+ EXCEPTION_PREAMBLE(isolate_);
+ Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name);
+ has_pending_exception = !maybe.has_value;
+ EXCEPTION_BAILOUT_CHECK(isolate_, v8::Local<Value>());
+ if (!maybe.value) return v8::Local<Value>();
+ }
+ i::Handle<i::Object> value;
EXCEPTION_PREAMBLE(isolate_);
- Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name);
- has_pending_exception = !maybe.has_value;
+ has_pending_exception = !i::Object::GetProperty(obj, name).ToHandle(&value);
EXCEPTION_BAILOUT_CHECK(isolate_, v8::Local<Value>());
- if (!maybe.value) return v8::Local<Value>();
- i::Handle<i::Object> value;
- if (!i::Object::GetProperty(obj, name).ToHandle(&value)) {
- return v8::Local<Value>();
- }
return v8::Utils::ToLocal(scope.CloseAndEscape(value));
} else {
return v8::Local<Value>();
@@ -2054,8 +2047,9 @@ ScriptOrigin Message::GetScriptOrigin() const {
Utils::ToLocal(scriptName),
v8::Integer::New(v8_isolate, script->line_offset()->value()),
v8::Integer::New(v8_isolate, script->column_offset()->value()),
- Handle<Boolean>(),
- v8::Integer::New(v8_isolate, script->id()->value()));
+ v8::Boolean::New(v8_isolate, script->is_shared_cross_origin()),
+ v8::Integer::New(v8_isolate, script->id()->value()),
+ v8::Boolean::New(v8_isolate, script->is_embedder_debug_script()));
return origin;
}
@@ -2328,6 +2322,101 @@ bool StackFrame::IsConstructor() const {
}
+// --- N a t i v e W e a k M a p ---
+
+Local<NativeWeakMap> NativeWeakMap::New(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ i::Handle<i::JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
+ i::Runtime::WeakCollectionInitialize(isolate, weakmap);
+ return Utils::NativeWeakMapToLocal(weakmap);
+}
+
+
+void NativeWeakMap::Set(Handle<Value> v8_key, Handle<Value> v8_value) {
+ i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
+ i::Isolate* isolate = weak_collection->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
+ i::Handle<i::Object> value = Utils::OpenHandle(*v8_value);
+ if (!key->IsJSReceiver() && !key->IsSymbol()) {
+ DCHECK(false);
+ return;
+ }
+ i::Handle<i::ObjectHashTable> table(
+ i::ObjectHashTable::cast(weak_collection->table()));
+ if (!table->IsKey(*key)) {
+ DCHECK(false);
+ return;
+ }
+ i::Runtime::WeakCollectionSet(weak_collection, key, value);
+}
+
+
+Local<Value> NativeWeakMap::Get(Handle<Value> v8_key) {
+ i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
+ i::Isolate* isolate = weak_collection->GetIsolate();
+ ENTER_V8(isolate);
+ i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
+ if (!key->IsJSReceiver() && !key->IsSymbol()) {
+ DCHECK(false);
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+ i::Handle<i::ObjectHashTable> table(
+ i::ObjectHashTable::cast(weak_collection->table()));
+ if (!table->IsKey(*key)) {
+ DCHECK(false);
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+ i::Handle<i::Object> lookup(table->Lookup(key), isolate);
+ if (lookup->IsTheHole())
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ return Utils::ToLocal(lookup);
+}
+
+
+bool NativeWeakMap::Has(Handle<Value> v8_key) {
+ i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
+ i::Isolate* isolate = weak_collection->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
+ if (!key->IsJSReceiver() && !key->IsSymbol()) {
+ DCHECK(false);
+ return false;
+ }
+ i::Handle<i::ObjectHashTable> table(
+ i::ObjectHashTable::cast(weak_collection->table()));
+ if (!table->IsKey(*key)) {
+ DCHECK(false);
+ return false;
+ }
+ i::Handle<i::Object> lookup(table->Lookup(key), isolate);
+ return !lookup->IsTheHole();
+}
+
+
+bool NativeWeakMap::Delete(Handle<Value> v8_key) {
+ i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
+ i::Isolate* isolate = weak_collection->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
+ if (!key->IsJSReceiver() && !key->IsSymbol()) {
+ DCHECK(false);
+ return false;
+ }
+ i::Handle<i::ObjectHashTable> table(
+ i::ObjectHashTable::cast(weak_collection->table()));
+ if (!table->IsKey(*key)) {
+ DCHECK(false);
+ return false;
+ }
+ return i::Runtime::WeakCollectionDelete(weak_collection, key);
+}
+
+
// --- J S O N ---
Local<Value> JSON::Parse(Local<String> json_string) {
@@ -2886,7 +2975,7 @@ double Value::NumberValue() const {
EXCEPTION_PREAMBLE(isolate);
has_pending_exception = !i::Execution::ToNumber(
isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, base::OS::nan_value());
+ EXCEPTION_BAILOUT_CHECK(isolate, std::numeric_limits<double>::quiet_NaN());
}
return num->Number();
}
@@ -2980,8 +3069,8 @@ Local<Uint32> Value::ToArrayIndex() const {
int32_t Value::Int32Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
+ if (obj->IsNumber()) {
+ return NumberToInt32(*obj);
} else {
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "Int32Value (slow)");
@@ -3081,8 +3170,8 @@ bool Value::SameValue(Handle<Value> that) const {
uint32_t Value::Uint32Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
+ if (obj->IsNumber()) {
+ return NumberToUint32(*obj);
} else {
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "Uint32Value");
@@ -3162,7 +3251,7 @@ bool v8::Object::SetPrivate(v8::Handle<Private> key, v8::Handle<Value> value) {
i::MaybeHandle<i::Object> DeleteObjectProperty(
i::Isolate* isolate, i::Handle<i::JSReceiver> receiver,
- i::Handle<i::Object> key, i::JSReceiver::DeleteMode mode) {
+ i::Handle<i::Object> key, i::LanguageMode language_mode) {
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
@@ -3176,7 +3265,7 @@ i::MaybeHandle<i::Object> DeleteObjectProperty(
return isolate->factory()->true_value();
}
- return i::JSReceiver::DeleteElement(receiver, index, mode);
+ return i::JSReceiver::DeleteElement(receiver, index, language_mode);
}
i::Handle<i::Name> name;
@@ -3194,33 +3283,7 @@ i::MaybeHandle<i::Object> DeleteObjectProperty(
if (name->IsString()) {
name = i::String::Flatten(i::Handle<i::String>::cast(name));
}
- return i::JSReceiver::DeleteProperty(receiver, name, mode);
-}
-
-
-bool v8::Object::ForceDelete(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ForceDelete()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
-
- // When deleting a property on the global object using ForceDelete
- // deoptimize all functions as optimized code does not check for the hole
- // value with DontDelete properties. We have to deoptimize all contexts
- // because of possible cross-context inlined functions.
- if (self->IsJSGlobalProxy() || self->IsGlobalObject()) {
- i::Deoptimizer::DeoptimizeAll(isolate);
- }
-
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj;
- has_pending_exception =
- !DeleteObjectProperty(isolate, self, key_obj,
- i::JSReceiver::FORCE_DELETION).ToHandle(&obj);
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return obj->IsTrue();
+ return i::JSReceiver::DeleteProperty(receiver, name, language_mode);
}
@@ -3395,37 +3458,6 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
}
-static bool GetPredefinedToString(i::Handle<i::String> tag,
- Local<String>* result) {
- i::Isolate* i_isolate = tag->GetIsolate();
- Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
- i::Factory* factory = i_isolate->factory();
-
- if (i::String::Equals(tag, factory->Arguments_string())) {
- *result = v8::String::NewFromUtf8(isolate, "[object ~Arguments]");
- } else if (i::String::Equals(tag, factory->Array_string())) {
- *result = v8::String::NewFromUtf8(isolate, "[object ~Array]");
- } else if (i::String::Equals(tag, factory->Boolean_string())) {
- *result = v8::String::NewFromUtf8(isolate, "[object ~Boolean]");
- } else if (i::String::Equals(tag, factory->Date_string())) {
- *result = v8::String::NewFromUtf8(isolate, "[object ~Date]");
- } else if (i::String::Equals(tag, factory->Error_string())) {
- *result = v8::String::NewFromUtf8(isolate, "[object ~Error]");
- } else if (i::String::Equals(tag, factory->Function_string())) {
- *result = v8::String::NewFromUtf8(isolate, "[object ~Function]");
- } else if (i::String::Equals(tag, factory->Number_string())) {
- *result = v8::String::NewFromUtf8(isolate, "[object ~Number]");
- } else if (i::String::Equals(tag, factory->RegExp_string())) {
- *result = v8::String::NewFromUtf8(isolate, "[object ~RegExp]");
- } else if (i::String::Equals(tag, factory->String_string())) {
- *result = v8::String::NewFromUtf8(isolate, "[object ~String]");
- } else {
- return false;
- }
- return true;
-}
-
-
Local<String> v8::Object::ObjectProtoToString() {
i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
@@ -3459,16 +3491,8 @@ Local<String> v8::Object::ObjectProtoToString() {
.ToHandle(&tag);
EXCEPTION_BAILOUT_CHECK(i_isolate, Local<v8::String>());
- if (!tag->IsUndefined()) {
- if (!tag->IsString())
- return v8::String::NewFromUtf8(isolate, "[object ???]");
- i::Handle<i::String> tag_name = i::Handle<i::String>::cast(tag);
- if (!i::String::Equals(class_name, tag_name)) {
- Local<String> result;
- if (GetPredefinedToString(tag_name, &result)) return result;
-
- class_name = tag_name;
- }
+ if (tag->IsString()) {
+ class_name = i::Handle<i::String>::cast(tag);
}
}
const char* prefix = "[object ";
@@ -3524,8 +3548,7 @@ bool v8::Object::Delete(v8::Handle<Value> key) {
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj;
has_pending_exception =
- !DeleteObjectProperty(isolate, self, key_obj,
- i::JSReceiver::NORMAL_DELETION).ToHandle(&obj);
+ !DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return obj->IsTrue();
}
@@ -3650,16 +3673,6 @@ bool Object::SetAccessor(Handle<Name> name,
}
-bool Object::SetDeclaredAccessor(Local<Name> name,
- Local<DeclaredAccessorDescriptor> descriptor,
- PropertyAttribute attributes,
- AccessControl settings) {
- void* null = NULL;
- return ObjectSetAccessor(
- this, name, descriptor, null, null, settings, attributes);
-}
-
-
void Object::SetAccessorProperty(Local<Name> name,
Local<Function> getter,
Handle<Function> setter,
@@ -4262,7 +4275,10 @@ ScriptOrigin Function::GetScriptOrigin() const {
v8::ScriptOrigin origin(
Utils::ToLocal(scriptName),
v8::Integer::New(isolate, script->line_offset()->value()),
- v8::Integer::New(isolate, script->column_offset()->value()));
+ v8::Integer::New(isolate, script->column_offset()->value()),
+ v8::Boolean::New(isolate, script->is_shared_cross_origin()),
+ v8::Integer::New(isolate, script->id()->value()),
+ v8::Boolean::New(isolate, script->is_embedder_debug_script()));
return origin;
}
return v8::ScriptOrigin(Handle<Value>());
@@ -5109,7 +5125,7 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
if (!InternalFieldOK(obj, index, location)) return;
i::Handle<i::Object> val = Utils::OpenHandle(*value);
obj->SetInternalField(index, *val);
- DCHECK_EQ(value, GetInternalField(index));
+ DCHECK(value->Equals(GetInternalField(index)));
}
@@ -5370,7 +5386,8 @@ Local<v8::Object> ObjectTemplate::NewInstance() {
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj;
- has_pending_exception = !i::Execution::InstantiateObject(info).ToHandle(&obj);
+ has_pending_exception =
+ !i::ApiNatives::InstantiateObject(info).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
}
@@ -5386,7 +5403,7 @@ Local<v8::Function> FunctionTemplate::GetFunction() {
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj;
has_pending_exception =
- !i::Execution::InstantiateFunction(info).ToHandle(&obj);
+ !i::ApiNatives::InstantiateFunction(info).ToHandle(&obj);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Function>());
return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
}
@@ -5478,20 +5495,19 @@ inline Local<String> NewString(Isolate* v8_isolate,
String::NewStringType type,
int length) {
i::Isolate* isolate = reinterpret_cast<internal::Isolate*>(v8_isolate);
+ ON_BAILOUT(isolate, location, return Local<String>());
LOG_API(isolate, env);
- if (length == 0 && type != String::kUndetectableString) {
+ if (length == 0) {
return String::Empty(v8_isolate);
}
ENTER_V8(isolate);
if (length == -1) length = StringLength(data);
- // We do not expect this to fail. Change this if it does.
- i::Handle<i::String> result = NewString(
- isolate->factory(),
- type,
- i::Vector<const Char>(data, length)).ToHandleChecked();
- if (type == String::kUndetectableString) {
- result->MarkAsUndetectable();
- }
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::String> result;
+ has_pending_exception =
+ !NewString(isolate->factory(), type, i::Vector<const Char>(data, length))
+ .ToHandle(&result);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
return Utils::ToLocal(result);
}
@@ -5769,7 +5785,7 @@ Local<v8::Value> v8::Date::New(Isolate* isolate, double time) {
LOG_API(i_isolate, "Date::New");
if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
- time = base::OS::nan_value();
+ time = std::numeric_limits<double>::quiet_NaN();
}
ENTER_V8(i_isolate);
EXCEPTION_PREAMBLE(i_isolate);
@@ -6290,7 +6306,7 @@ Local<Number> v8::Number::New(Isolate* isolate, double value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (std::isnan(value)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
- value = base::OS::nan_value();
+ value = std::numeric_limits<double>::quiet_NaN();
}
ENTER_V8(internal_isolate);
i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
@@ -7470,6 +7486,11 @@ void HeapSnapshot::Serialize(OutputStream* stream,
}
+// static
+STATIC_CONST_MEMBER_DEFINITION const SnapshotObjectId
+ HeapProfiler::kUnknownObjectId;
+
+
int HeapProfiler::GetSnapshotCount() {
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotsCount();
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 62f9f1e0b3..d9e3bbab9c 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -135,43 +135,42 @@ class RegisteredExtension {
};
-#define OPEN_HANDLE_LIST(V) \
- V(Template, TemplateInfo) \
- V(FunctionTemplate, FunctionTemplateInfo) \
- V(ObjectTemplate, ObjectTemplateInfo) \
- V(Signature, SignatureInfo) \
- V(AccessorSignature, FunctionTemplateInfo) \
- V(TypeSwitch, TypeSwitchInfo) \
- V(Data, Object) \
- V(RegExp, JSRegExp) \
- V(Object, JSObject) \
- V(Array, JSArray) \
- V(ArrayBuffer, JSArrayBuffer) \
- V(ArrayBufferView, JSArrayBufferView) \
- V(TypedArray, JSTypedArray) \
- V(Uint8Array, JSTypedArray) \
- V(Uint8ClampedArray, JSTypedArray) \
- V(Int8Array, JSTypedArray) \
- V(Uint16Array, JSTypedArray) \
- V(Int16Array, JSTypedArray) \
- V(Uint32Array, JSTypedArray) \
- V(Int32Array, JSTypedArray) \
- V(Float32Array, JSTypedArray) \
- V(Float64Array, JSTypedArray) \
- V(DataView, JSDataView) \
- V(Name, Name) \
- V(String, String) \
- V(Symbol, Symbol) \
- V(Script, JSFunction) \
- V(UnboundScript, SharedFunctionInfo) \
- V(Function, JSFunction) \
- V(Message, JSMessageObject) \
- V(Context, Context) \
- V(External, Object) \
- V(StackTrace, JSArray) \
- V(StackFrame, JSObject) \
- V(DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
-
+#define OPEN_HANDLE_LIST(V) \
+ V(Template, TemplateInfo) \
+ V(FunctionTemplate, FunctionTemplateInfo) \
+ V(ObjectTemplate, ObjectTemplateInfo) \
+ V(Signature, FunctionTemplateInfo) \
+ V(AccessorSignature, FunctionTemplateInfo) \
+ V(TypeSwitch, TypeSwitchInfo) \
+ V(Data, Object) \
+ V(RegExp, JSRegExp) \
+ V(Object, JSObject) \
+ V(Array, JSArray) \
+ V(ArrayBuffer, JSArrayBuffer) \
+ V(ArrayBufferView, JSArrayBufferView) \
+ V(TypedArray, JSTypedArray) \
+ V(Uint8Array, JSTypedArray) \
+ V(Uint8ClampedArray, JSTypedArray) \
+ V(Int8Array, JSTypedArray) \
+ V(Uint16Array, JSTypedArray) \
+ V(Int16Array, JSTypedArray) \
+ V(Uint32Array, JSTypedArray) \
+ V(Int32Array, JSTypedArray) \
+ V(Float32Array, JSTypedArray) \
+ V(Float64Array, JSTypedArray) \
+ V(DataView, JSDataView) \
+ V(Name, Name) \
+ V(String, String) \
+ V(Symbol, Symbol) \
+ V(Script, JSFunction) \
+ V(UnboundScript, SharedFunctionInfo) \
+ V(Function, JSFunction) \
+ V(Message, JSMessageObject) \
+ V(Context, Context) \
+ V(External, Object) \
+ V(StackTrace, JSArray) \
+ V(StackFrame, JSObject) \
+ V(NativeWeakMap, JSWeakMap)
class Utils {
public:
@@ -249,16 +248,16 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<ObjectTemplate> ToLocal(
v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
- static inline Local<Signature> ToLocal(
- v8::internal::Handle<v8::internal::SignatureInfo> obj);
+ static inline Local<Signature> SignatureToLocal(
+ v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<AccessorSignature> AccessorSignatureToLocal(
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<DeclaredAccessorDescriptor> ToLocal(
- v8::internal::Handle<v8::internal::DeclaredAccessorDescriptor> obj);
+ static inline Local<NativeWeakMap> NativeWeakMapToLocal(
+ v8::internal::Handle<v8::internal::JSWeakMap> obj);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
@@ -354,7 +353,7 @@ TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
-MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
+MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
@@ -365,7 +364,7 @@ MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
-MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
+MAKE_TO_LOCAL(NativeWeakMapToLocal, JSWeakMap, NativeWeakMap)
#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL
@@ -373,14 +372,14 @@ MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
// Implementations of OpenHandle
-#define MAKE_OPEN_HANDLE(From, To) \
- v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
- const v8::From* that, bool allow_empty_handle) { \
- EXTRA_CHECK(allow_empty_handle || that != NULL); \
- EXTRA_CHECK(that == NULL || \
- (*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \
- return v8::internal::Handle<v8::internal::To>( \
- reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
+#define MAKE_OPEN_HANDLE(From, To) \
+ v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
+ const v8::From* that, bool allow_empty_handle) { \
+ DCHECK(allow_empty_handle || that != NULL); \
+ DCHECK(that == NULL || \
+ (*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \
+ return v8::internal::Handle<v8::internal::To>( \
+ reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
}
OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js
deleted file mode 100644
index 3e38d10035..0000000000
--- a/deps/v8/src/apinatives.js
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-"use strict";
-
-// This file contains infrastructure used by the API. See
-// v8natives.js for an explanation of these files are processed and
-// loaded.
-
-
-function CreateDate(time) {
- var date = new $Date();
- date.setTime(time);
- return date;
-}
-
-
-var kApiFunctionCache = new InternalArray();
-var functionCache = kApiFunctionCache;
-
-
-function Instantiate(data, name) {
- if (!%IsTemplate(data)) return data;
- var tag = %GetTemplateField(data, kApiTagOffset);
- switch (tag) {
- case kFunctionTag:
- return InstantiateFunction(data, name);
- case kNewObjectTag:
- var Constructor = %GetTemplateField(data, kApiConstructorOffset);
- // Note: Do not directly use a function template as a condition, our
- // internal ToBoolean doesn't handle that!
- var result;
- if (typeof Constructor === 'undefined') {
- result = {};
- ConfigureTemplateInstance(result, data);
- } else {
- // ConfigureTemplateInstance is implicitly called before calling the API
- // constructor in HandleApiCall.
- result = new (Instantiate(Constructor))();
- result = %ToFastProperties(result);
- }
- return result;
- default:
- throw 'Unknown API tag <' + tag + '>';
- }
-}
-
-
-function InstantiateFunction(data, name) {
- // We need a reference to kApiFunctionCache in the stack frame
- // if we need to bail out from a stack overflow.
- var cache = kApiFunctionCache;
- var serialNumber = %GetTemplateField(data, kApiSerialNumberOffset);
- var isFunctionCached =
- (serialNumber in cache) && (cache[serialNumber] != kUninitialized);
- if (!isFunctionCached) {
- try {
- var flags = %GetTemplateField(data, kApiFlagOffset);
- var prototype;
- if (!(flags & (1 << kRemovePrototypeBit))) {
- var template = %GetTemplateField(data, kApiPrototypeTemplateOffset);
- prototype = typeof template === 'undefined'
- ? {} : Instantiate(template);
-
- var parent = %GetTemplateField(data, kApiParentTemplateOffset);
- // Note: Do not directly use a function template as a condition, our
- // internal ToBoolean doesn't handle that!
- if (typeof parent !== 'undefined') {
- var parent_fun = Instantiate(parent);
- %InternalSetPrototype(prototype, parent_fun.prototype);
- }
- }
- var fun = %CreateApiFunction(data, prototype);
- if (IS_STRING(name)) %FunctionSetName(fun, name);
- var doNotCache = flags & (1 << kDoNotCacheBit);
- if (!doNotCache) cache[serialNumber] = fun;
- ConfigureTemplateInstance(fun, data);
- if (doNotCache) return fun;
- } catch (e) {
- cache[serialNumber] = kUninitialized;
- throw e;
- }
- }
- return cache[serialNumber];
-}
-
-
-function ConfigureTemplateInstance(obj, data) {
- var properties = %GetTemplateField(data, kApiPropertyListOffset);
- if (!properties) return;
- // Disable access checks while instantiating the object.
- var requires_access_checks = %DisableAccessChecks(obj);
- try {
- for (var i = 1; i < properties[0];) {
- var length = properties[i];
- if (length == 3) {
- var name = properties[i + 1];
- var prop_data = properties[i + 2];
- var attributes = properties[i + 3];
- var value = Instantiate(prop_data, name);
- %AddPropertyForTemplate(obj, name, value, attributes);
- } else if (length == 4 || length == 5) {
- // TODO(verwaest): The 5th value used to be access_control. Remove once
- // the bindings are updated.
- var name = properties[i + 1];
- var getter = properties[i + 2];
- var setter = properties[i + 3];
- var attribute = properties[i + 4];
- %DefineApiAccessorProperty(obj, name, getter, setter, attribute);
- } else {
- throw "Bad properties array";
- }
- i += length + 1;
- }
- } finally {
- if (requires_access_checks) %EnableAccessChecks(obj);
- }
-}
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index e6c6db569b..049a34f4ff 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -58,6 +58,10 @@ class Arguments BASE_EMBEDDED {
Object** arguments() { return arguments_; }
+ Object** lowest_address() { return &this->operator[](length() - 1); }
+
+ Object** highest_address() { return &this->operator[](0); }
+
private:
intptr_t length_;
Object** arguments_;
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 105d711c1f..7a091d0deb 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -228,7 +228,8 @@ const char* DwVfpRegister::AllocationIndexToString(int index) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-const int RelocInfo::kApplyMask = 0;
+// static
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
bool RelocInfo::IsCodedSpecially() {
@@ -491,6 +492,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
+ reloc_info_writer.Finish();
if (!FLAG_enable_ool_constant_pool) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
@@ -796,14 +798,20 @@ int Assembler::target_at(int pos) {
// Emitted link to a label, not part of a branch.
return instr;
}
- DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- int imm26 = ((instr & kImm24Mask) << 8) >> 6;
- if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
- ((instr & B24) != 0)) {
- // blx uses bit 24 to encode bit 2 of imm26
- imm26 += 2;
+ if ((instr & 7 * B25) == 5 * B25) {
+ int imm26 = ((instr & kImm24Mask) << 8) >> 6;
+ // b, bl, or blx imm24
+ if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
+ ((instr & B24) != 0)) {
+ // blx uses bit 24 to encode bit 2 of imm26
+ imm26 += 2;
+ }
+ return pos + kPcLoadDelta + imm26;
}
- return pos + kPcLoadDelta + imm26;
+ // Internal reference to the label.
+ DCHECK_EQ(7 * B25 | 1 * B0, instr & (7 * B25 | 1 * B0));
+ int imm26 = (((instr >> 1) & kImm24Mask) << 8) >> 6;
+ return pos + imm26;
}
@@ -877,19 +885,25 @@ void Assembler::target_at_put(int pos, int target_pos) {
}
return;
}
- int imm26 = target_pos - (pos + kPcLoadDelta);
- DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- if (Instruction::ConditionField(instr) == kSpecialCondition) {
- // blx uses bit 24 to encode bit 2 of imm26
- DCHECK((imm26 & 1) == 0);
- instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
- } else {
- DCHECK((imm26 & 3) == 0);
- instr &= ~kImm24Mask;
+ if ((instr & 7 * B25) == 5 * B25) {
+ // b, bl, or blx imm24
+ int imm26 = target_pos - (pos + kPcLoadDelta);
+ if (Instruction::ConditionField(instr) == kSpecialCondition) {
+ // blx uses bit 24 to encode bit 2 of imm26
+ DCHECK((imm26 & 1) == 0);
+ instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1) * B24;
+ } else {
+ DCHECK((imm26 & 3) == 0);
+ instr &= ~kImm24Mask;
+ }
+ int imm24 = imm26 >> 2;
+ DCHECK(is_int24(imm24));
+ instr_at_put(pos, instr | (imm24 & kImm24Mask));
+ return;
}
- int imm24 = imm26 >> 2;
- DCHECK(is_int24(imm24));
- instr_at_put(pos, instr | (imm24 & kImm24Mask));
+ // Patch internal reference to label.
+ DCHECK_EQ(7 * B25 | 1 * B0, instr & (7 * B25 | 1 * B0));
+ instr_at_put(pos, reinterpret_cast<Instr>(buffer_ + target_pos));
}
@@ -3355,28 +3369,6 @@ bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
// Debugging.
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
// code.
@@ -3417,9 +3409,16 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // None of our relocation types are pc relative pointing outside the code
- // buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries.
+ // Relocate internal references.
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ if (it.rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
+ // Don't patch unbound internal references (bit 0 set); those are still
+ // hooked up in the Label chain and will be automatically patched once
+ // the label is bound.
+ int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ if ((*p & 1 * B0) == 0) *p += pc_delta;
+ }
+ }
// Relocate pending relocation entries.
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
@@ -3463,6 +3462,37 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dd(Label* label) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ if (label->is_bound()) {
+ uint32_t data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
+ DCHECK_EQ(0u, data & 1 * B0);
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+ } else {
+ int target_pos;
+ if (label->is_linked()) {
+ // Point to previous instruction that uses the link.
+ target_pos = label->pos();
+ } else {
+ // First entry of the link chain points to itself.
+ target_pos = pc_offset();
+ }
+ label->link_to(pc_offset());
+ // Encode internal reference to unbound label. We set the least significant
+ // bit to distinguish unbound internal references in GrowBuffer() below.
+ int imm26 = target_pos - pc_offset();
+ DCHECK_EQ(0, imm26 & 3);
+ int imm24 = imm26 >> 2;
+ DCHECK(is_int24(imm24));
+ // We use bit pattern 0000111<imm24>1 because that doesn't match any branch
+ // or load that would also appear on the label chain.
+ emit(7 * B25 | ((imm24 & kImm24Mask) << 1) | 1 * B0);
+ }
+}
+
+
void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) =
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 4a719e6aaf..7d1e0bdbee 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -1398,6 +1398,10 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg);
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, const int raw_position);
+
// Record the emission of a constant pool.
//
// The emission of constant pool depends on the size of the code generated and
@@ -1423,6 +1427,7 @@ class Assembler : public AssemblerBase {
// are not emitted as part of the tables generated.
void db(uint8_t data);
void dd(uint32_t data);
+ void dd(Label* label);
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 8ce57d5544..d13d4ffa25 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -129,6 +129,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
}
+ __ mov(r3, r1);
// Run the native code for the Array function called as a normal function.
// tail call a stub
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
@@ -310,6 +311,36 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
+static void Generate_Runtime_NewObject(MacroAssembler* masm,
+ bool create_memento,
+ Register original_constructor,
+ Label* count_incremented,
+ Label* allocated) {
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+ __ push(r2);
+ }
+
+ __ push(r1); // argument for Runtime_NewObject
+ __ push(original_constructor); // original constructor
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(r4, r0);
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ if (create_memento) {
+ __ jmp(count_incremented);
+ } else {
+ __ jmp(allocated);
+ }
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
@@ -317,6 +348,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- r0 : number of arguments
// -- r1 : constructor function
// -- r2 : allocation site or undefined
+ // -- r3 : original constructor
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -331,7 +363,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
- __ AssertUndefinedOrAllocationSite(r2, r3);
+ __ AssertUndefinedOrAllocationSite(r2, r4);
__ push(r2);
}
@@ -340,9 +372,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r0); // Smi-tagged arguments count.
__ push(r1); // Constructor function.
+ Label rt_call, allocated, normal_new, count_incremented;
+ __ cmp(r1, r3);
+ __ b(eq, &normal_new);
+
+ // Original constructor and function are different.
+ Generate_Runtime_NewObject(masm, create_memento, r3, &count_incremented,
+ &allocated);
+ __ bind(&normal_new);
+
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
ExternalReference debug_step_in_fp =
@@ -569,27 +609,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// r1: constructor function
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
- __ push(r2);
- }
-
- __ push(r1); // argument for Runtime_NewObject
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
- } else {
- __ CallRuntime(Runtime::kNewObject, 1);
- }
- __ mov(r4, r0);
-
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
+ Generate_Runtime_NewObject(masm, create_memento, r1, &count_incremented,
+ &allocated);
// Receiver for constructor call allocated.
// r4: JSObject
@@ -721,6 +742,94 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- r2 : allocation site or undefined
+ // -- r3 : original constructor
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // TODO(dslomov): support pretenuring
+ CHECK(!FLAG_pretenuring_call_new);
+
+ {
+ FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+
+ __ mov(r4, r0);
+ __ SmiTag(r4);
+ __ push(r4); // Smi-tagged arguments count.
+
+ // Push new.target.
+ __ push(r3);
+
+ // receiver is the hole.
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ push(ip);
+
+ // Set up pointer to last argument.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ // r0: number of arguments
+ // r1: constructor function
+ // r2: address of last argument (caller sp)
+ // r4: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2 - 1));
+ __ push(ip);
+ __ bind(&entry);
+ __ sub(r4, r4, Operand(2), SetCC);
+ __ b(ge, &loop);
+
+ __ add(r0, r0, Operand(1));
+
+ // Handle step in.
+ Label skip_step_in;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ mov(r2, Operand(debug_step_in_fp));
+ __ ldr(r2, MemOperand(r2));
+ __ tst(r2, r2);
+ __ b(eq, &skip_step_in);
+
+ __ Push(r0);
+ __ Push(r1);
+ __ Push(r1);
+ __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
+ __ Pop(r1);
+ __ Pop(r0);
+
+ __ bind(&skip_step_in);
+
+ // Call the function.
+ // r0: number of arguments
+ // r1: constructor function
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
+
+ // Restore context from the frame.
+ // r0: result
+ // sp[0]: number of arguments (smi-tagged)
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ ldr(r1, MemOperand(sp, 0));
+
+ // Leave construct frame.
+ }
+
+ __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
+ __ add(sp, sp, Operand(kPointerSize));
+ __ Jump(lr);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index ef286bbe01..9188b58c32 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -561,7 +561,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ and_(r2, lhs, Operand(rhs));
__ JumpIfNotSmi(r2, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -916,6 +916,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -1487,6 +1488,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@@ -1544,6 +1546,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[4] : receiver displacement
// sp[8] : function
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1572,6 +1576,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r6 : allocated object (tagged)
// r9 : mapped parameter count (tagged)
+ CHECK(!has_new_target());
+
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
// r1 = parameter count (tagged)
@@ -1812,6 +1818,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ if (has_new_target()) {
+ // Subtract 1 from smi-tagged arguments count.
+ __ sub(r1, r1, Operand(2));
+ }
__ str(r1, MemOperand(sp, 0));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -1891,6 +1901,30 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
}
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // sp[0] : index of rest parameter
+ // sp[4] : number of parameters
+ // sp[8] : receiver displacement
+
+ Label runtime;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ str(r1, MemOperand(sp, 1 * kPointerSize));
+ __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 2 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+}
+
+
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2562,6 +2596,15 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r2, r5);
}
+ // Pass function as original constructor.
+ if (IsSuperConstructorCall()) {
+ __ mov(r4, Operand(1 * kPointerSize));
+ __ add(r4, r4, Operand(r0, LSL, kPointerSizeLog2));
+ __ ldr(r3, MemOperand(sp, r4));
+ } else {
+ __ mov(r3, r1);
+ }
+
// Jump to the function-specific construct stub.
Register jmp_reg = r4;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -2601,12 +2644,11 @@ static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id
+ // r2 - vector
Label miss;
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, r2);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
__ cmp(r1, r4);
__ b(ne, &miss);
@@ -2621,6 +2663,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ b(ne, &miss);
__ mov(r2, r4);
+ __ mov(r3, r1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@@ -2641,6 +2684,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
void CallICStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id (Smi)
+ // r2 - vector
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
@@ -2651,14 +2695,32 @@ void CallICStub::Generate(MacroAssembler* masm) {
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, r2);
-
// The checks. First, does r1 match the recorded monomorphic target?
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
- __ cmp(r1, r4);
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
+ __ cmp(r1, r5);
__ b(ne, &extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(r1, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2734,15 +2796,18 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
- // Store the function.
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(r1, MemOperand(r4, 0));
+ // Store the function. Use a stub since we need a frame for allocation.
+ // r2 - vector
+ // r3 - slot
+ // r1 - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(r1);
+ __ CallStub(&create_stub);
+ __ Pop(r1);
+ }
- // Update the write barrier.
- __ mov(r5, r1);
- __ RecordWrite(r2, r4, r5, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ jmp(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
@@ -2764,26 +2829,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ ldr(r4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
-
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the function and feedback info.
- __ Push(r4, r1, r2, r3);
+ // Push the receiver and the function and feedback info.
+ __ Push(r1, r2, r3);
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
- ExternalReference miss = ExternalReference(IC_Utility(id),
- masm->isolate());
- __ CallExternalReference(miss, 4);
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 3);
- // Move result to edi and exit the internal frame.
- __ mov(r1, r0);
- }
+ // Move result to edi and exit the internal frame.
+ __ mov(r1, r0);
}
@@ -3660,13 +3719,15 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
+ __ GetWeakValue(r4, cell);
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r2, Operand(known_map_));
+ __ cmp(r2, r4);
__ b(ne, &miss);
- __ cmp(r3, Operand(known_map_));
+ __ cmp(r3, r4);
__ b(ne, &miss);
__ sub(r0, r0, Operand(r1));
@@ -3744,7 +3805,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
@@ -4249,6 +4310,20 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
}
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, r2);
+ CallICStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, r2);
+ CallIC_ArrayStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4499,6 +4574,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r0 : argc (only if argument_count() == ANY)
// -- r1 : constructor
// -- r2 : AllocationSite or undefined
+ // -- r3 : original constructor
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -4519,6 +4595,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r2, r4);
}
+ Label subclassing;
+ __ cmp(r3, r1);
+ __ b(ne, &subclassing);
+
Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
@@ -4532,6 +4612,27 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ __ bind(&subclassing);
+ __ push(r1);
+ __ push(r3);
+
+ // Adjust argc.
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ add(r0, r0, Operand(2));
+ break;
+ case NONE:
+ __ mov(r0, Operand(2));
+ break;
+ case ONE:
+ __ mov(r0, Operand(3));
+ break;
+ }
+
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
}
@@ -4610,12 +4711,158 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address.is(r1) || function_address.is(r2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ mov(r9, Operand(ExternalReference::is_profiling_address(isolate)));
+ __ ldrb(r9, MemOperand(r9, 0));
+ __ cmp(r9, Operand(0));
+ __ b(eq, &profiler_disabled);
+
+ // Additional parameter is the address of the actual callback.
+ __ mov(r3, Operand(thunk_ref));
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ __ Move(r3, function_address);
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ mov(r9, Operand(next_address));
+ __ ldr(r4, MemOperand(r9, kNextOffset));
+ __ ldr(r5, MemOperand(r9, kLimitOffset));
+ __ ldr(r6, MemOperand(r9, kLevelOffset));
+ __ add(r6, r6, Operand(1));
+ __ str(r6, MemOperand(r9, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r0);
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate);
+ stub.GenerateCall(masm, r3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r0);
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // load value from ReturnValue
+ __ ldr(r0, return_value_operand);
+ __ bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ str(r4, MemOperand(r9, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ ldr(r1, MemOperand(r9, kLevelOffset));
+ __ cmp(r1, r6);
+ __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ __ sub(r6, r6, Operand(1));
+ __ str(r6, MemOperand(r9, kLevelOffset));
+ __ ldr(ip, MemOperand(r9, kLimitOffset));
+ __ cmp(r5, ip);
+ __ b(ne, &delete_allocated_handles);
+
+ // Check if the function scheduled an exception.
+ __ bind(&leave_exit_frame);
+ __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ ldr(r5, MemOperand(ip));
+ __ cmp(r4, r5);
+ __ b(ne, &promote_scheduled_exception);
+ __ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ ldr(cp, *context_restore_operand);
+ }
+ // LeaveExitFrame expects unwind space to be in a register.
+ if (stack_space_operand != NULL) {
+ __ ldr(r4, *stack_space_operand);
+ } else {
+ __ mov(r4, Operand(stack_space));
+ }
+ __ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
+ __ mov(pc, lr);
+
+ __ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
+ }
+ __ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ str(r5, MemOperand(r9, kLimitOffset));
+ __ mov(r4, r0);
+ __ PrepareCallCFunction(1, r5);
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
+ 1);
+ __ mov(r0, r4);
+ __ jmp(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
// -- r0 : callee
// -- r4 : call_data
// -- r2 : holder
// -- r1 : api_function_address
+ // -- r3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -4630,10 +4877,6 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = r1;
Register context = cp;
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
-
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
@@ -4645,6 +4888,8 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
+ DCHECK(argc.is_immediate() || r3.is(argc.reg()));
+
// context save
__ push(context);
// load context from callee
@@ -4665,8 +4910,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// return value default
__ push(scratch);
// isolate
- __ mov(scratch,
- Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
__ push(scratch);
// holder
__ push(holder);
@@ -4687,37 +4931,70 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ add(r0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ str(scratch, MemOperand(r0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ mov(ip, Operand(argc));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ mov(ip, Operand::Zero());
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ if (argc.is_immediate()) {
+ // FunctionCallbackInfo::values_
+ __ add(ip, scratch,
+ Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
+ __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ mov(ip, Operand(argc.immediate()));
+ __ str(ip, MemOperand(r0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ mov(ip, Operand::Zero());
+ __ str(ip, MemOperand(r0, 3 * kPointerSize));
+ } else {
+ // FunctionCallbackInfo::values_
+ __ add(ip, scratch, Operand(argc.reg(), LSL, kPointerSizeLog2));
+ __ add(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
+ __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ str(argc.reg(), MemOperand(r0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_
+ __ add(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
+ __ mov(ip, Operand(argc.reg(), LSL, kPointerSizeLog2));
+ __ str(ip, MemOperand(r0, 3 * kPointerSize));
+ }
+
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (is_store) {
+ if (return_first_arg) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ int stack_space = 0;
+ MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
+ MemOperand* stack_space_operand = &is_construct_call_operand;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
+ }
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
+ stack_space_operand, return_value_operand,
+ &context_restore_operand);
+}
+
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- return_value_operand,
- &context_restore_operand);
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
+ call_data_undefined);
+}
+
+
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
@@ -4748,11 +5025,9 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, NULL,
+ MemOperand(fp, 6 * kPointerSize), NULL);
}
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 0037ce18ac..af68fb24e2 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -172,6 +172,7 @@ enum {
U = 1 << 23, // Positive (or negative) offset/index.
P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
I = 1 << 25, // Immediate shifter operand (or not).
+ B0 = 1 << 0,
B4 = 1 << 4,
B5 = 1 << 5,
B6 = 1 << 6,
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index b3a6173cf0..be05344e16 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -165,6 +165,9 @@ void Deoptimizer::EntryGenerator::Generate() {
// handle this a bit differently.
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
+ __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ str(fp, MemOperand(ip));
+
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 3dc54203b1..7311d7e8c9 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -126,7 +126,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ if (is_sloppy(info->language_mode()) && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@@ -153,7 +153,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!info->function()->is_generator() || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -197,7 +197,7 @@ void FullCodeGenerator::Generate() {
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ push(r1);
- __ Push(info->scope()->GetScopeInfo());
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
@@ -240,6 +240,25 @@ void FullCodeGenerator::Generate() {
}
}
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(r2, Operand(Smi::FromInt(num_parameters)));
+ __ mov(r1, Operand(Smi::FromInt(rest_index)));
+ __ Push(r3, r2, r1);
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, r0, r1, r2);
+ }
+
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
@@ -262,15 +281,19 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type);
+ ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, r0, r1, r2);
@@ -450,7 +473,11 @@ void FullCodeGenerator::EmitReturnSequence() {
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
- int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ int32_t arg_count = info_->scope()->num_parameters() + 1;
+ if (IsSubclassConstructor(info_->function()->kind())) {
+ arg_count++;
+ }
+ int32_t sp_delta = arg_count * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
@@ -930,15 +957,16 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
+ ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
- DCHECK(variable->interface()->IsFrozen());
+ DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(r1, scope_->ContextChainLength(scope_->ScriptScope()));
- __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
+ __ ldr(r1, ContextOperand(r1, descriptor->Index()));
__ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
// Assign it.
@@ -1260,6 +1288,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
// Generate code for the body of the loop.
@@ -1299,7 +1328,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1537,6 +1566,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
+ } else if (var->is_this()) {
+ CHECK(info_->function() != nullptr &&
+ (info_->function()->kind() & kSubclassConstructor) != 0);
+ // TODO(dslomov): implement 'this' hole check elimination.
+ skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
@@ -1686,11 +1720,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
__ push(r0); // Save result on stack
@@ -1745,18 +1781,19 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ ldr(r0, MemOperand(sp));
__ push(r0);
VisitForStackValue(value);
- if (property->emit_store()) {
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- __ Drop(2);
- }
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
break;
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = value;
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = value;
+ }
break;
}
}
@@ -1778,6 +1815,69 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(r0); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
+ __ push(r0);
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ __ mov(r0, Operand(Smi::FromInt(NONE)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ mov(r0, Operand(Smi::FromInt(NONE)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ mov(r0, Operand(Smi::FromInt(NONE)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ break;
+ }
+ }
+ }
+
if (expr->has_function()) {
DCHECK(result_saved);
__ ldr(r0, MemOperand(sp));
@@ -1830,6 +1930,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
bool result_saved = false; // Is the result saved to the stack?
@@ -1969,19 +2070,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(r0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
- mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
+ EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
@@ -2372,7 +2469,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left_expr,
Expression* right_expr) {
Label done, smi_case, stub_call;
@@ -2392,7 +2488,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2478,9 +2574,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
- Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- DCHECK(key != NULL);
if (property->is_static()) {
__ ldr(scratch, MemOperand(sp, kPointerSize)); // constructor
@@ -2488,24 +2582,29 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ ldr(scratch, MemOperand(sp, 0)); // prototype
}
__ push(scratch);
- VisitForStackValue(key);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
- __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ __ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
break;
case ObjectLiteral::Property::SETTER:
- __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ __ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
break;
default:
@@ -2521,11 +2620,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(r1);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2600,7 +2697,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
break;
}
@@ -2669,8 +2766,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
// Assignment to var.
__ push(r0); // Value.
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(strict_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ mov(r0, Operand(Smi::FromInt(language_mode())));
+ __ Push(cp, r1, r0); // Context, name, language mode.
__ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
// Assignment to var or initializing assignment to let/const in harmony
@@ -2685,7 +2782,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+ } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
}
@@ -2719,8 +2816,8 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(r0);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
+ __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
4);
}
@@ -2732,9 +2829,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(r0);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime(
+ (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
@@ -2746,7 +2844,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r0));
- Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2769,8 +2868,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(r0);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
@@ -2785,8 +2882,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
- context()->Plug(r0);
}
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(r0);
}
@@ -2945,9 +3043,8 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(
- isolate(), arg_count, call_type);
- __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2975,8 +3072,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
- // r2: strict mode.
- __ mov(r2, Operand(Smi::FromInt(strict_mode())));
+ // r2: language mode.
+ __ mov(r2, Operand(Smi::FromInt(language_mode())));
// r1: the start position of the scope the calls resides in.
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
@@ -2988,8 +3085,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
- DCHECK(super_ref != NULL);
+void FullCodeGenerator::EmitLoadSuperConstructor() {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(r0);
__ CallRuntime(Runtime::kGetPrototype, 1);
@@ -3112,11 +3208,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
}
} else if (call_type == Call::SUPER_CALL) {
- SuperReference* super_ref = callee->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ Push(result_register());
- VisitForStackValue(super_ref->this_var());
- EmitCall(expr, CallICState::METHOD);
+ EmitSuperConstructorCall(expr);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -3145,12 +3237,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- if (expr->expression()->IsSuperReference()) {
- EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
- __ Push(result_register());
- } else {
- VisitForStackValue(expr->expression());
- }
+ DCHECK(!expr->expression()->IsSuperReference());
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3184,6 +3272,66 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ if (!ValidateSuperCall(expr)) return;
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ push(result_register());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into r1 and r0.
+ __ mov(r0, Operand(arg_count));
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ UNREACHABLE();
+ /* TODO(dslomov): support pretenuring.
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ */
+ }
+
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ RecordJSReturnSite(expr);
+
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ Variable* this_var = super_ref->this_var()->var();
+ GetVar(r1, this_var);
+ __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
+ Label uninitialized_this;
+ __ b(eq, &uninitialized_this);
+ __ mov(r0, Operand(this_var->name()));
+ __ Push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
+ EmitVariableAssignment(this_var, Token::INIT_CONST);
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3707,7 +3855,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -4038,6 +4186,61 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ Push(result_register());
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, args_set_up, runtime;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &adaptor_frame);
+ // default constructor has no arguments, so no adaptor frame means no args.
+ __ mov(r0, Operand::Zero());
+ __ b(&args_set_up);
+
+ // Copy arguments from adaptor frame.
+ {
+ __ bind(&adaptor_frame);
+ __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(r1, r1);
+
+ // Subtract 1 from arguments count, for new.target.
+ __ sub(r1, r1, Operand(1));
+ __ mov(r0, r1);
+
+ // Get arguments pointer in r2.
+ __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
+ __ add(r2, r2, Operand(StandardFrameConstants::kCallerSPOffset));
+ Label loop;
+ __ bind(&loop);
+ // Pre-decrement r2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
+ __ Push(r3);
+ __ sub(r1, r1, Operand(1));
+ __ cmp(r1, Operand::Zero());
+ __ b(ne, &loop);
+ }
+
+ __ bind(&args_set_up);
+ __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ context()->Plug(result_register());
+}
+
+
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
@@ -4055,7 +4258,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(2, args->length());
- DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
@@ -4452,7 +4655,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ mov(r1, Operand(Smi::FromInt(strict_mode())));
+ __ mov(r1, Operand(Smi::FromInt(language_mode())));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -4460,7 +4663,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- DCHECK(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(is_sloppy(language_mode()) || var->is_this());
if (var->IsUnallocated()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
@@ -4676,6 +4879,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4711,8 +4915,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4780,7 +4983,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 6e77ee474a..da0cba9d10 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -98,7 +98,19 @@ void FastCloneShallowObjectDescriptor::Initialize(
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2, r3, r1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
@@ -125,6 +137,16 @@ void CallFunctionWithFeedbackDescriptor::Initialize(
}
+void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1, r3, r2};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the function to call
@@ -318,6 +340,31 @@ void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
r4, // call_data
r2, // holder
r1, // api_function_address
+ r3, // actual number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Integer32(), // actual number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ r0, // callee
+ r4, // call_data
+ r2, // holder
+ r1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index e5de950334..2e097f9302 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -255,6 +255,20 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
+void LCallFunction::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ if (hydrogen()->HasVectorAndSlot()) {
+ stream->Add(" (type-feedback-vector ");
+ temp_vector()->PrintTo(stream);
+ stream->Add(" ");
+ temp_slot()->PrintTo(stream);
+ stream->Add(")");
+ }
+}
+
+
void LCallJSFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -1243,7 +1257,15 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(r3);
+ vector = FixedTemp(r2);
+ }
+
+ LCallFunction* call =
+ new (zone()) LCallFunction(context, function, slot, vector);
return MarkAsCall(DefineFixed(call, r0), instr);
}
@@ -1508,9 +1530,10 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+ int32_t constant_value = 0;
if (right->IsConstant()) {
HConstant* constant = HConstant::cast(right);
- int32_t constant_value = constant->Integer32Value();
+ constant_value = constant->Integer32Value();
// Constants -1, 0 and 1 can be optimized if the result can overflow.
// For other constants, it can be optimized only without overflow.
if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
@@ -1533,7 +1556,10 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
right_op = UseRegister(right);
}
LMulI* mul = new(zone()) LMulI(left_op, right_op);
- if (can_overflow || bailout_on_minus_zero) {
+ if (right_op->IsConstantOperand()
+ ? ((can_overflow && constant_value == -1) ||
+ (bailout_on_minus_zero && constant_value <= 0))
+ : (can_overflow || bailout_on_minus_zero)) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 1920935e61..fc8b300f35 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -1360,6 +1360,7 @@ class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
DECLARE_HYDROGEN_ACCESSOR(Constant)
double value() const { return hydrogen()->DoubleValue(); }
+ uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
};
@@ -1938,20 +1939,26 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LCallFunction(LOperand* context, LOperand* function) {
+ LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = function;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
int arity() const { return hydrogen()->argument_count() - 1; }
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -2224,7 +2231,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
@@ -2287,7 +2294,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 9d9591b69f..5b6ed2caf3 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -61,7 +61,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
}
@@ -120,8 +119,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() &&
- info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -338,7 +336,7 @@ bool LCodeGen::GenerateJumpTable() {
DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
@@ -842,7 +840,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -895,21 +893,22 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ stop("trap_on_deopt", condition);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() ||
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
@@ -919,11 +918,11 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail) {
+ Deoptimizer::DeoptReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, detail, bailout_type);
+ DeoptimizeIf(condition, instr, deopt_reason, bailout_type);
}
@@ -945,6 +944,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1158,7 +1158,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, dividend, Operand(mask));
__ rsb(dividend, dividend, Operand::Zero(), SetCC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
__ b(&done);
}
@@ -1176,7 +1176,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, "division by zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1191,7 +1191,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ b(ne, &remainder_not_zero);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -1211,7 +1211,7 @@ void LCodeGen::DoModI(LModI* instr) {
// case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, "division by zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
@@ -1222,7 +1222,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ b(ne, &no_overflow_possible);
__ cmp(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
} else {
__ b(ne, &no_overflow_possible);
__ mov(result_reg, Operand::Zero());
@@ -1243,7 +1243,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
__ bind(&done);
@@ -1268,7 +1268,7 @@ void LCodeGen::DoModI(LModI* instr) {
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, "division by zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
}
__ Move(result_reg, left_reg);
@@ -1298,7 +1298,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(mi, instr, "minus zero");
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
}
__ bind(&done);
}
@@ -1316,19 +1316,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ tst(dividend, Operand(mask));
- DeoptimizeIf(ne, instr, "lost precision");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1356,7 +1356,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, "division by zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1364,7 +1364,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1374,7 +1374,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ smull(scratch0(), ip, result, ip);
__ sub(scratch0(), scratch0(), dividend, SetCC);
- DeoptimizeIf(ne, instr, "lost precision");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1389,7 +1389,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, "division by zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1401,7 +1401,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
__ b(pl, &positive);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
__ bind(&positive);
}
@@ -1413,7 +1413,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(dividend, Operand(kMinInt));
__ cmp(divisor, Operand(-1), eq);
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1436,7 +1436,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
Register remainder = scratch0();
__ Mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr, "lost precision");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1487,13 +1487,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
return;
}
@@ -1516,7 +1516,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, "division by zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1524,7 +1524,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1565,7 +1565,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr, "division by zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1577,7 +1577,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
__ bind(&positive);
}
@@ -1589,7 +1589,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1635,14 +1635,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
switch (constant) {
case -1:
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ rsb(result, left, Operand::Zero());
}
@@ -1652,7 +1652,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr, "minus zero");
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
}
__ mov(result, Operand::Zero());
break;
@@ -1702,7 +1702,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ smull(result, scratch, left, right);
}
__ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr, "overflow");
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1718,7 +1718,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
@@ -1781,7 +1781,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr, "negative value");
+ DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
} else {
__ mov(result, Operand(left, LSR, scratch));
}
@@ -1818,7 +1818,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, "negative value");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
}
__ Move(result, left);
}
@@ -1833,7 +1833,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTag(result, left, SetCC);
}
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ mov(result, Operand(left, LSL, shift_count));
}
@@ -1865,7 +1865,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
}
@@ -1886,7 +1886,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
}
@@ -1904,6 +1904,20 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
DCHECK(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
+#if V8_HOST_ARCH_IA32
+ // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
+ // builds.
+ uint64_t bits = instr->bits();
+ if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
+ V8_UINT64_C(0x7FF0000000000000)) {
+ uint32_t lo = static_cast<uint32_t>(bits);
+ uint32_t hi = static_cast<uint32_t>(bits >> 32);
+ __ mov(ip, Operand(lo));
+ __ mov(scratch0(), Operand(hi));
+ __ vmov(result, ip, scratch0());
+ return;
+ }
+#endif
double v = instr->value();
__ Vmov(result, v, scratch0());
}
@@ -1940,9 +1954,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(!scratch.is(object));
__ SmiTst(object);
- DeoptimizeIf(eq, instr, "Smi");
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, "not a date object");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -2059,7 +2073,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
}
@@ -2171,8 +2185,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r0));
DCHECK(ToRegister(instr->result()).is(r0));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
@@ -2285,7 +2298,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
- DeoptimizeIf(eq, instr, "Smi");
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
}
const Register map = scratch0();
@@ -2341,7 +2354,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, "unexpected object");
+ DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
}
}
}
@@ -2987,7 +3000,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
}
@@ -3042,7 +3055,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register payload = ToRegister(instr->temp());
__ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
// Store the value.
@@ -3059,7 +3072,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
@@ -3080,7 +3093,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
} else {
__ b(ne, &skip_assignment);
}
@@ -3161,7 +3174,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -3287,7 +3300,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr, "negative value");
+ DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -3340,7 +3353,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
}
@@ -3374,11 +3387,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result);
- DeoptimizeIf(ne, instr, "not a Smi");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
}
}
@@ -3520,9 +3533,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
- DeoptimizeIf(eq, instr, "Smi");
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr, "not a JavaScript object");
+ DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
@@ -3557,7 +3570,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr, "too many arguments");
+ DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -3636,24 +3649,20 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- R1State r1_state) {
+ int formal_parameter_count, int arity,
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
+ Register function_reg = r1;
+
LPointerMap* pointers = instr->pointer_map();
if (can_invoke_directly) {
- if (r1_state == R1_UNINITIALIZED) {
- __ Move(r1, function);
- }
-
// Change context.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
@@ -3662,7 +3671,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
__ Call(ip);
// Set up deoptimization.
@@ -3671,7 +3680,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
}
}
@@ -3687,7 +3696,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr, "not a heap number");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
Label done;
Register exponent = scratch0();
@@ -3755,7 +3764,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
// if input is positive.
__ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
@@ -3802,7 +3811,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
Label done, exact;
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
- DeoptimizeIf(al, instr, "lost precision or NaN");
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3810,7 +3819,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr, "minus zero");
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
}
__ bind(&done);
}
@@ -3836,7 +3845,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
// [-0.5, -0].
- DeoptimizeIf(mi, instr, "minus zero");
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
}
__ VFPCompareAndSetFlags(input, dot_five);
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
@@ -3850,7 +3859,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
- DeoptimizeIf(al, instr, "lost precision or NaN");
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
__ bind(&done);
}
@@ -3914,7 +3923,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r6, Operand(ip));
- DeoptimizeIf(ne, instr, "not a heap number");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3973,9 +3982,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- R1_CONTAINS_TARGET);
+ instr->arity(), instr);
}
}
@@ -4101,8 +4108,30 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(r3));
+ DCHECK(vector_register.is(r2));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ Move(vector_register, vector);
+ __ mov(slot_register, Operand(Smi::FromInt(index)));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4280,7 +4309,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4303,7 +4332,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, "out of bounds");
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
}
}
@@ -4502,7 +4531,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4551,7 +4580,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr, "memento found");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4886,12 +4915,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ tst(input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr, "overflow");
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTag(output, input, SetCC);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ SmiTag(output, input);
}
@@ -4905,7 +4934,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr, "not a Smi");
+ DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
} else {
__ SmiUntag(result, input);
}
@@ -4933,7 +4962,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ b(ne, &convert);
} else {
- DeoptimizeIf(ne, instr, "not a heap number");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
}
// load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
@@ -4943,7 +4972,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ b(ne, &done);
__ VmovHigh(scratch, result_reg);
__ cmp(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done);
if (can_convert_undefined_to_nan) {
@@ -4951,7 +4980,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr, "not a heap number/undefined");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
@@ -5019,22 +5048,22 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
__ mov(input_reg, Operand::Zero());
} else {
- DeoptimizeIf(ne, instr, "not a heap number");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero");
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
}
}
__ bind(&done);
@@ -5103,14 +5132,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero");
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
@@ -5128,26 +5157,26 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero");
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
__ SmiTag(result_reg, SetCC);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(ne, instr, "not a Smi");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
}
@@ -5155,7 +5184,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr, "Smi");
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
}
}
@@ -5176,13 +5205,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, "wrong instance type");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
} else {
- DeoptimizeIf(lo, instr, "wrong instance type");
+ DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr, "wrong instance type");
+ DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
}
}
} else {
@@ -5193,11 +5222,11 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type");
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, "wrong instance type");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
}
}
}
@@ -5216,7 +5245,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, "value mismatch");
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
}
@@ -5231,7 +5260,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr, "instance migration failed");
+ DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
}
@@ -5289,7 +5318,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
}
__ bind(&success);
@@ -5328,7 +5357,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr, "not a heap number/undefined");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ mov(result_reg, Operand::Zero());
__ jmp(&done);
@@ -5533,7 +5562,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -5796,19 +5825,19 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
- DeoptimizeIf(eq, instr, "undefined");
+ DeoptimizeIf(eq, instr, Deoptimizer::kUndefined);
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r0, null_value);
- DeoptimizeIf(eq, instr, "null");
+ DeoptimizeIf(eq, instr, Deoptimizer::kNull);
__ SmiTst(r0);
- DeoptimizeIf(eq, instr, "Smi");
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, "wrong instance type");
+ DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
@@ -5824,7 +5853,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
__ bind(&use_cache);
}
@@ -5846,7 +5875,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, "no cache");
+ DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
__ bind(&done);
}
@@ -5857,7 +5886,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 65cc213453..713e6954b6 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -144,7 +144,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictMode strict_mode() const { return info()->strict_mode(); }
+ LanguageMode language_mode() const { return info()->language_mode(); }
Scope* scope() const { return scope_; }
@@ -216,18 +216,11 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
- enum R1State {
- R1_UNINITIALIZED,
- R1_CONTAINS_TARGET
- };
-
// Generate a direct call to a known function. Expects the function
// to be in r1.
void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- R1State r1_state);
+ int formal_parameter_count, int arity,
+ LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -235,9 +228,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, Deoptimizer::BailoutType bailout_type);
+ Deoptimizer::DeoptReason deopt_reason,
+ Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail);
+ Deoptimizer::DeoptReason deopt_reason);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 0aa886bac7..c8fb60dafa 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -1118,9 +1118,9 @@ int MacroAssembler::ActivationFrameAlignment() {
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count,
- bool restore_context) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool restore_context,
+ bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
// Optionally restore all double registers.
@@ -1154,7 +1154,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
if (argument_count.is_valid()) {
- add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
+ if (argument_count_is_length) {
+ add(sp, sp, argument_count);
+ } else {
+ add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
+ }
}
}
@@ -1704,7 +1708,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ldr(t1, FieldMemOperand(t2, kDetailsOffset));
- DCHECK_EQ(FIELD, 0);
+ DCHECK_EQ(DATA, 0);
tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
b(ne, miss);
@@ -2275,10 +2279,15 @@ void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
}
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, Operand(cell));
ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+ Label* miss) {
+ GetWeakValue(value, cell);
JumpIfSmi(value, miss);
}
@@ -2357,139 +2366,6 @@ void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
}
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
-
- DCHECK(function_address.is(r1) || function_address.is(r2));
-
- Label profiler_disabled;
- Label end_profiler_check;
- mov(r9, Operand(ExternalReference::is_profiling_address(isolate())));
- ldrb(r9, MemOperand(r9, 0));
- cmp(r9, Operand(0));
- b(eq, &profiler_disabled);
-
- // Additional parameter is the address of the actual callback.
- mov(r3, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- Move(r3, function_address);
- bind(&end_profiler_check);
-
- // Allocate HandleScope in callee-save registers.
- mov(r9, Operand(next_address));
- ldr(r4, MemOperand(r9, kNextOffset));
- ldr(r5, MemOperand(r9, kLimitOffset));
- ldr(r6, MemOperand(r9, kLevelOffset));
- add(r6, r6, Operand(1));
- str(r6, MemOperand(r9, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, r0);
- mov(r0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(this, r3);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, r0);
- mov(r0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // load value from ReturnValue
- ldr(r0, return_value_operand);
- bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- str(r4, MemOperand(r9, kNextOffset));
- if (emit_debug_code()) {
- ldr(r1, MemOperand(r9, kLevelOffset));
- cmp(r1, r6);
- Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
- }
- sub(r6, r6, Operand(1));
- str(r6, MemOperand(r9, kLevelOffset));
- ldr(ip, MemOperand(r9, kLimitOffset));
- cmp(r5, ip);
- b(ne, &delete_allocated_handles);
-
- // Check if the function scheduled an exception.
- bind(&leave_exit_frame);
- LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
- ldr(r5, MemOperand(ip));
- cmp(r4, r5);
- b(ne, &promote_scheduled_exception);
- bind(&exception_handled);
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- ldr(cp, *context_restore_operand);
- }
- // LeaveExitFrame expects unwind space to be in a register.
- mov(r4, Operand(stack_space));
- LeaveExitFrame(false, r4, !restore_context);
- mov(pc, lr);
-
- bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0);
- }
- jmp(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- str(r5, MemOperand(r9, kLimitOffset));
- mov(r4, r0);
- PrepareCallCFunction(1, r5);
- mov(r0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(
- ExternalReference::delete_handle_scope_extensions(isolate()), 1);
- mov(r0, r4);
- jmp(&leave_exit_frame);
-}
-
-
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -3879,6 +3755,19 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
}
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ ldr(dst,
+ FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
+ : AccessorPair::kSetterOffset;
+ ldr(dst, FieldMemOperand(dst, offset));
+}
+
+
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Register empty_fixed_array_value = r6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index d83b64b80f..5de013e270 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -555,9 +555,9 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles,
- Register argument_count,
- bool restore_context);
+ void LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool restore_context,
+ bool argument_count_is_length = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -924,6 +924,8 @@ class MacroAssembler: public Assembler {
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+ void GetWeakValue(Register value, Handle<WeakCell> cell);
+
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
@@ -944,7 +946,7 @@ class MacroAssembler: public Assembler {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
tst(type, Operand(kIsNotStringMask), cond);
- DCHECK_EQ(0, kStringTag);
+ DCHECK_EQ(0u, kStringTag);
return eq;
}
@@ -1114,16 +1116,6 @@ class MacroAssembler: public Assembler {
void MovFromFloatParameter(DwVfpRegister dst);
void MovFromFloatResult(DwVfpRegister dst);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call JS arguments space and
- // the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@@ -1367,6 +1359,8 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template<typename Field>
void DecodeField(Register dst, Register src) {
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index f4918febb5..c0c677c1b1 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -96,12 +96,11 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
+ Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 7414e54a65..078d0dfa62 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -16,7 +16,8 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
+ RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
virtual ~RegExpMacroAssemblerARM();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index e34c3116e6..209b5d2ae8 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1635,13 +1635,10 @@ void Simulator::HandleVList(Instruction* instr) {
ReadW(reinterpret_cast<int32_t>(address), instr),
ReadW(reinterpret_cast<int32_t>(address + 1), instr)
};
- double d;
- memcpy(&d, data, 8);
- set_d_register_from_double(reg, d);
+ set_d_register(reg, reinterpret_cast<uint32_t*>(data));
} else {
- int32_t data[2];
- double d = get_double_from_d_register(reg);
- memcpy(data, &d, 8);
+ uint32_t data[2];
+ get_d_register(reg, data);
WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
@@ -1917,8 +1914,13 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
double Simulator::canonicalizeNaN(double value) {
- return (FPSCR_default_NaN_mode_ && std::isnan(value)) ?
- FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value;
+ // Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
+ // choices" of the ARM Reference Manual.
+ const uint64_t kDefaultNaN = V8_UINT64_C(0x7FF8000000000000);
+ if (FPSCR_default_NaN_mode_ && std::isnan(value)) {
+ value = bit_cast<double>(kDefaultNaN);
+ }
+ return value;
}
@@ -3031,7 +3033,9 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if (instr->SzValue() == 0x1) {
int m = instr->VFPMRegValue(kDoublePrecision);
int d = instr->VFPDRegValue(kDoublePrecision);
- set_d_register_from_double(d, get_double_from_d_register(m));
+ uint32_t data[2];
+ get_d_register(m, data);
+ set_d_register(d, data);
} else {
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
@@ -3069,7 +3073,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
- double dd_value = std::sqrt(dm_value);
+ double dd_value = fast_sqrt(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if (instr->Opc3Value() == 0x0) {
@@ -3167,12 +3171,10 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
(instr->Bit(23) == 0x0)) {
// vmov (ARM core register to scalar)
int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
- double dd_value = get_double_from_d_register(vd);
- int32_t data[2];
- memcpy(data, &dd_value, 8);
+ uint32_t data[2];
+ get_d_register(vd, data);
data[instr->Bit(21)] = get_register(instr->RtValue());
- memcpy(&dd_value, data, 8);
- set_d_register_from_double(vd, dd_value);
+ set_d_register(vd, data);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x1) &&
(instr->Bit(23) == 0x0)) {
@@ -3529,16 +3531,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
int rn = instr->RnValue();
int vm = instr->VFPMRegValue(kDoublePrecision);
if (instr->HasL()) {
- int32_t data[2];
- double d = get_double_from_d_register(vm);
- memcpy(data, &d, 8);
+ uint32_t data[2];
+ get_d_register(vm, data);
set_register(rt, data[0]);
set_register(rn, data[1]);
} else {
int32_t data[] = { get_register(rt), get_register(rn) };
- double d;
- memcpy(&d, data, 8);
- set_d_register_from_double(vm, d);
+ set_d_register(vm, reinterpret_cast<uint32_t*>(data));
}
}
break;
@@ -3559,14 +3558,11 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
ReadW(address, instr),
ReadW(address + 4, instr)
};
- double val;
- memcpy(&val, data, 8);
- set_d_register_from_double(vd, val);
+ set_d_register(vd, reinterpret_cast<uint32_t*>(data));
} else {
// Store double to memory: vstr.
- int32_t data[2];
- double val = get_double_from_d_register(vd);
- memcpy(data, &val, 8);
+ uint32_t data[2];
+ get_d_register(vd, data);
WriteW(address, data[0], instr);
WriteW(address + 4, data[1], instr);
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 770d425af2..bba78c89e4 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -311,8 +311,8 @@ void ConstPool::RecordEntry(intptr_t data,
mode != RelocInfo::STATEMENT_POSITION &&
mode != RelocInfo::CONST_POOL &&
mode != RelocInfo::VENEER_POOL &&
- mode != RelocInfo::CODE_AGE_SEQUENCE);
-
+ mode != RelocInfo::CODE_AGE_SEQUENCE &&
+ mode != RelocInfo::DEOPT_REASON);
uint64_t raw_data = static_cast<uint64_t>(data);
int offset = assm_->pc_offset();
if (IsEmpty()) {
@@ -590,6 +590,7 @@ void Assembler::Reset() {
void Assembler::GetCode(CodeDesc* desc) {
+ reloc_info_writer.Finish();
// Emit constant pool if necessary.
CheckConstPool(true, false);
DCHECK(constpool_.IsEmpty());
@@ -2848,11 +2849,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (((rmode >= RelocInfo::JS_RETURN) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
(rmode == RelocInfo::CONST_POOL) ||
- (rmode == RelocInfo::VENEER_POOL)) {
+ (rmode == RelocInfo::VENEER_POOL) ||
+ (rmode == RelocInfo::DEOPT_REASON)) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
+ || RelocInfo::IsDeoptReason(rmode)
|| RelocInfo::IsPosition(rmode)
|| RelocInfo::IsConstPool(rmode)
|| RelocInfo::IsVeneerPool(rmode));
@@ -3069,33 +3072,11 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
}
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
int Assembler::buffer_space() const {
return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
}
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
// code.
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 53496f37ce..996898553c 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -1007,6 +1007,11 @@ class Assembler : public AssemblerBase {
// Debugging ----------------------------------------------------------------
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
void RecordComment(const char* msg);
+
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, const int raw_position);
+
int buffer_space() const;
// Mark address of the ExitJSFrame code.
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 9f140c2f7d..89e304051a 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -126,6 +126,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ Mov(x3, x1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -301,6 +302,33 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
+static void Generate_Runtime_NewObject(MacroAssembler* masm,
+ bool create_memento,
+ Register original_constructor,
+ Label* count_incremented,
+ Label* allocated) {
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ Peek(x4, 2 * kXRegSize);
+ __ Push(x4);
+ __ Push(x1); // Argument for Runtime_NewObject.
+ __ Push(original_constructor);
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ __ Mov(x4, x0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ __ jmp(count_incremented);
+ } else {
+ __ Push(x1); // Argument for Runtime_NewObject.
+ __ Push(original_constructor);
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Mov(x4, x0);
+ __ jmp(allocated);
+ }
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
@@ -308,6 +336,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- x0 : number of arguments
// -- x1 : constructor function
// -- x2 : allocation site or undefined
+ // -- x3 : original constructor
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -330,15 +359,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register argc = x0;
Register constructor = x1;
+ Register original_constructor = x3;
// x1: constructor function
__ SmiTag(argc);
__ Push(argc, constructor);
// sp[0] : Constructor function.
// sp[1]: number of arguments (smi-tagged)
+ Label rt_call, count_incremented, allocated, normal_new;
+ __ Cmp(constructor, original_constructor);
+ __ B(eq, &normal_new);
+ Generate_Runtime_NewObject(masm, create_memento, original_constructor,
+ &count_incremented, &allocated);
+
+ __ Bind(&normal_new);
+
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
ExternalReference debug_step_in_fp =
@@ -535,23 +572,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
__ Bind(&rt_call);
- Label count_incremented;
- if (create_memento) {
- // Get the cell or allocation site.
- __ Peek(x4, 2 * kXRegSize);
- __ Push(x4);
- __ Push(constructor); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
- __ Mov(x4, x0);
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- __ jmp(&count_incremented);
- } else {
- __ Push(constructor); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
- __ Mov(x4, x0);
- }
+ Generate_Runtime_NewObject(masm, create_memento, constructor,
+ &count_incremented, &allocated);
// Receiver for constructor call allocated.
// x4: JSObject
@@ -685,6 +707,98 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- x2 : allocation site or undefined
+ // -- x3 : original constructor
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_JSConstructStubForDerived");
+
+ // TODO(dslomov): support pretenuring
+ CHECK(!FLAG_pretenuring_call_new);
+
+ {
+ FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+ __ Mov(x4, x0);
+ __ SmiTag(x4);
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Push(x4, x3, x10);
+ // sp[0]: number of arguments
+ // sp[1]: new.target
+ // sp[2]: receiver (the hole)
+
+
+ // Set up pointer to last argument.
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
+
+ // Copy arguments and receiver to the expression stack.
+ // Copy 2 values every loop to use ldp/stp.
+ // x0: number of arguments
+ // x1: constructor function
+ // x2: address of last argument (caller sp)
+ // jssp[0]: receiver
+ // jssp[1]: new.target
+ // jssp[2]: number of arguments (smi-tagged)
+ // Compute the start address of the copy in x4.
+ __ Add(x4, x2, Operand(x0, LSL, kPointerSizeLog2));
+ Label loop, entry, done_copying_arguments;
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
+ __ Push(x11, x10);
+ __ Bind(&entry);
+ __ Cmp(x4, x2);
+ __ B(gt, &loop);
+ // Because we copied values 2 by 2 we may have copied one extra value.
+ // Drop it if that is the case.
+ __ B(eq, &done_copying_arguments);
+ __ Drop(1);
+ __ Bind(&done_copying_arguments);
+
+ __ Add(x0, x0, Operand(1)); // new.target
+
+ // Handle step in.
+ Label skip_step_in;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ Mov(x2, Operand(debug_step_in_fp));
+ __ Ldr(x2, MemOperand(x2));
+ __ Cbz(x2, &skip_step_in);
+
+ __ Push(x0, x1, x1);
+ __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
+ __ Pop(x1, x0);
+
+ __ bind(&skip_step_in);
+
+ // Call the function.
+ // x0: number of arguments
+ // x1: constructor function
+ ParameterCount actual(x0);
+ __ InvokeFunction(x1, actual, CALL_FUNCTION, NullCallWrapper());
+
+
+ // Restore the context from the frame.
+ // x0: result
+ // jssp[0]: number of arguments (smi-tagged)
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // Load number of arguments (smi).
+ __ Peek(x1, 0);
+
+ // Leave construct frame
+ }
+
+ __ DropBySMI(x1);
+ __ Drop(1);
+ __ Ret();
+}
+
+
// Input:
// x0: code entry.
// x1: function.
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index e773b531a1..c50a30a042 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -973,6 +973,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
@@ -1642,6 +1643,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ CHECK(!has_new_target());
Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
Register key = ArgumentsAccessReadDescriptor::index();
DCHECK(arg_count.is(x0));
@@ -1698,6 +1700,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// jssp[8]: address of receiver argument
// jssp[16]: function
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
Register caller_fp = x10;
@@ -1729,6 +1733,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
//
// Returns pointer to result object in x0.
+ CHECK(!has_new_target());
+
// Note: arg_count_smi is an alias of param_count_smi.
Register arg_count_smi = x3;
Register param_count_smi = x3;
@@ -2055,6 +2061,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
MemOperand(caller_fp,
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(param_count, param_count_smi);
+ if (has_new_target()) {
+ // Skip new.target: it is not a part of arguments.
+ __ Sub(param_count, param_count, Operand(1));
+ __ SmiTag(param_count_smi, param_count);
+ }
__ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
@@ -2149,6 +2160,53 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
}
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: index of rest parameter (tagged)
+ // jssp[8]: number of parameters (tagged)
+ // jssp[16]: address of receiver argument
+ //
+ // Returns pointer to result object in x0.
+
+ // Get the stub arguments from the frame, and make an untagged copy of the
+ // parameter count.
+ Register rest_index_smi = x1;
+ Register param_count_smi = x2;
+ Register params = x3;
+ Register param_count = x13;
+ __ Pop(rest_index_smi, param_count_smi, params);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Test if arguments adaptor needed.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &runtime);
+
+ // x1 rest_index_smi index of rest parameter
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
+
+ // Patch the argument length and parameters pointer.
+ __ Ldr(param_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(param_count, param_count_smi);
+ __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
+ __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
+
+ __ Bind(&runtime);
+ __ Push(params, param_count_smi, rest_index_smi);
+ __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+}
+
+
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
@@ -2938,6 +2996,14 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(x2, x5);
}
+ if (IsSuperConstructorCall()) {
+ __ Mov(x4, Operand(1 * kPointerSize));
+ __ Add(x4, x4, Operand(x0, LSL, kPointerSizeLog2));
+ __ Peek(x3, x4);
+ } else {
+ __ Mov(x3, function);
+ }
+
// Jump to the function-specific construct stub.
Register jump_reg = x4;
Register shared_func_info = jump_reg;
@@ -2981,14 +3047,13 @@ static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// x1 - function
// x3 - slot id
+ // x2 - vector
Label miss;
Register function = x1;
Register feedback_vector = x2;
Register index = x3;
Register scratch = x4;
- EmitLoadTypeFeedbackVector(masm, feedback_vector);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
__ Cmp(function, scratch);
__ B(ne, &miss);
@@ -3006,6 +3071,9 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
Register allocation_site = feedback_vector;
__ Mov(allocation_site, scratch);
+
+ Register original_constructor = x3;
+ __ Mov(original_constructor, function);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@@ -3027,6 +3095,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// x1 - function
// x3 - slot id (Smi)
+ // x2 - vector
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
@@ -3042,16 +3111,33 @@ void CallICStub::Generate(MacroAssembler* masm) {
Register index = x3;
Register type = x4;
- EmitLoadTypeFeedbackVector(masm, feedback_vector);
-
// The checks. First, does x1 match the recorded monomorphic target?
__ Add(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
- __ Cmp(x4, function);
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
+ __ Cmp(x5, function);
__ B(ne, &extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(function, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -3127,20 +3213,18 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
- // Store the function.
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Str(function, FieldMemOperand(x4, FixedArray::kHeaderSize));
-
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Str(function, MemOperand(x4, 0));
+ // Store the function. Use a stub since we need a frame for allocation.
+ // x2 - vector
+ // x3 - slot
+ // x1 - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(function);
+ __ CallStub(&create_stub);
+ __ Pop(function);
+ }
- // Update the write barrier.
- __ Mov(x5, function);
- __ RecordWrite(feedback_vector, x4, x5, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ B(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
@@ -3163,26 +3247,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("CallICStub[Miss]");
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ Peek(x4, (arg_count() + 1) * kPointerSize);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the function and feedback info.
- __ Push(x4, x1, x2, x3);
+ // Push the receiver and the function and feedback info.
+ __ Push(x1, x2, x3);
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
- ExternalReference miss = ExternalReference(IC_Utility(id),
- masm->isolate());
- __ CallExternalReference(miss, 4);
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 3);
- // Move result to edi and exit the internal frame.
- __ Mov(x1, x0);
- }
+ // Move result to edi and exit the internal frame.
+ __ Mov(x1, x0);
}
@@ -3574,6 +3652,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
ASM_LOCATION("CompareICStub[KnownObjects]");
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
Register result = x0;
Register rhs = x0;
@@ -3583,11 +3662,13 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Register rhs_map = x10;
Register lhs_map = x11;
+ Register map = x12;
+ __ GetWeakValue(map, cell);
__ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
__ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ Cmp(rhs_map, Operand(known_map_));
+ __ Cmp(rhs_map, map);
__ B(ne, &miss);
- __ Cmp(lhs_map, Operand(known_map_));
+ __ Cmp(lhs_map, map);
__ B(ne, &miss);
__ Sub(result, rhs, lhs);
@@ -4372,6 +4453,20 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
}
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, x2);
+ CallICStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, x2);
+ CallIC_ArrayStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@@ -4911,14 +5006,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("ArrayConstructorStub::Generate");
// ----------- S t a t e -------------
- // -- x0 : argc (only if argument_count() == ANY)
+ // -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
// -- x1 : constructor
// -- x2 : AllocationSite or undefined
- // -- sp[0] : return address
- // -- sp[4] : last argument
+ // -- x3 : original constructor
+ // -- sp[0] : last argument
// -----------------------------------
Register constructor = x1;
Register allocation_site = x2;
+ Register original_constructor = x3;
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
@@ -4940,6 +5036,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
}
+ Label subclassing;
+ __ Cmp(original_constructor, constructor);
+ __ B(ne, &subclassing);
+
Register kind = x3;
Label no_info;
// Get the elements kind and case on that.
@@ -4953,6 +5053,25 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ // Subclassing support.
+ __ Bind(&subclassing);
+ __ Push(constructor, original_constructor);
+ // Adjust argc.
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ add(x0, x0, Operand(2));
+ break;
+ case NONE:
+ __ Mov(x0, Operand(2));
+ break;
+ case ONE:
+ __ Mov(x0, Operand(3));
+ break;
+ }
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
}
@@ -5043,12 +5162,185 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+// The number of register that CallApiFunctionAndReturn will need to save on
+// the stack. The space for these registers need to be allocated in the
+// ExitFrame before calling CallApiFunctionAndReturn.
+static const int kCallApiFunctionSpillSpace = 4;
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions.
+// 'stack_space' is the space to be unwound on exit (includes the call JS
+// arguments space and the additional space allocated for the fast call).
+// 'spill_offset' is the offset from the stack pointer where
+// CallApiFunctionAndReturn can spill registers.
+static void CallApiFunctionAndReturn(
+ MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand, int spill_offset,
+ MemOperand return_value_operand, MemOperand* context_restore_operand) {
+ ASM_LOCATION("CallApiFunctionAndReturn");
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address.is(x1) || function_address.is(x2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ Mov(x10, ExternalReference::is_profiling_address(isolate));
+ __ Ldrb(w10, MemOperand(x10));
+ __ Cbz(w10, &profiler_disabled);
+ __ Mov(x3, thunk_ref);
+ __ B(&end_profiler_check);
+
+ __ Bind(&profiler_disabled);
+ __ Mov(x3, function_address);
+ __ Bind(&end_profiler_check);
+
+ // Save the callee-save registers we are going to use.
+ // TODO(all): Is this necessary? ARM doesn't do it.
+ STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
+ __ Poke(x19, (spill_offset + 0) * kXRegSize);
+ __ Poke(x20, (spill_offset + 1) * kXRegSize);
+ __ Poke(x21, (spill_offset + 2) * kXRegSize);
+ __ Poke(x22, (spill_offset + 3) * kXRegSize);
+
+ // Allocate HandleScope in callee-save registers.
+ // We will need to restore the HandleScope after the call to the API function,
+ // by allocating it in callee-save registers they will be preserved by C code.
+ Register handle_scope_base = x22;
+ Register next_address_reg = x19;
+ Register limit_reg = x20;
+ Register level_reg = w21;
+
+ __ Mov(handle_scope_base, next_address);
+ __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ __ Add(level_reg, level_reg, 1);
+ __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ Mov(x0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate);
+ stub.GenerateCall(masm, x3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ Mov(x0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ Ldr(x0, return_value_operand);
+ __ Bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
+ __ Cmp(w1, level_reg);
+ __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ __ Sub(level_reg, level_reg, 1);
+ __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
+ __ Cmp(limit_reg, x1);
+ __ B(ne, &delete_allocated_handles);
+
+ __ Bind(&leave_exit_frame);
+ // Restore callee-saved registers.
+ __ Peek(x19, (spill_offset + 0) * kXRegSize);
+ __ Peek(x20, (spill_offset + 1) * kXRegSize);
+ __ Peek(x21, (spill_offset + 2) * kXRegSize);
+ __ Peek(x22, (spill_offset + 3) * kXRegSize);
+
+ // Check if the function scheduled an exception.
+ __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
+ __ Ldr(x5, MemOperand(x5));
+ __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
+ &promote_scheduled_exception);
+ __ Bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ Ldr(cp, *context_restore_operand);
+ }
+
+ if (stack_space_operand != NULL) {
+ __ Ldr(w2, *stack_space_operand);
+ }
+
+ __ LeaveExitFrame(false, x1, !restore_context);
+ if (stack_space_operand != NULL) {
+ __ Drop(x2, 1);
+ } else {
+ __ Drop(stack_space);
+ }
+ __ Ret();
+
+ __ Bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
+ }
+ __ B(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ Bind(&delete_allocated_handles);
+ __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ // Save the return value in a callee-save register.
+ Register saved_result = x19;
+ __ Mov(saved_result, x0);
+ __ Mov(x0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
+ 1);
+ __ Mov(x0, saved_result);
+ __ B(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
// -- x0 : callee
// -- x4 : call_data
// -- x2 : holder
// -- x1 : api_function_address
+ // -- x3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -5063,10 +5355,6 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = x1;
Register context = cp;
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
-
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
@@ -5078,6 +5366,8 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
+ DCHECK(argc.is_immediate() || x3.is(argc.reg()));
+
// FunctionCallbackArguments: context, callee and call data.
__ Push(context, callee, call_data);
@@ -5088,7 +5378,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
}
Register isolate_reg = x5;
- __ Mov(isolate_reg, ExternalReference::isolate_address(isolate()));
+ __ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
// FunctionCallbackArguments:
// return value, return value default, isolate, holder.
@@ -5113,37 +5403,70 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Add(x0, masm->StackPointer(), 1 * kPointerSize);
- // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
- __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
- __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc and
- // FunctionCallbackInfo::is_construct_call = 0
- __ Mov(x10, argc);
- __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
-
- const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ if (argc.is_immediate()) {
+ // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+ __ Add(x10, args,
+ Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
+ __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc and
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ Mov(x10, argc.immediate());
+ __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
+ } else {
+ // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+ __ Add(x10, args, Operand(argc.reg(), LSL, kPointerSizeLog2));
+ __ Add(x10, x10, (FCA::kArgsLength - 1) * kPointerSize);
+ __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc and
+ // FunctionCallbackInfo::is_construct_call
+ __ Add(x10, argc.reg(), FCA::kArgsLength + 1);
+ __ Mov(x10, Operand(x10, LSL, kPointerSizeLog2));
+ __ Stp(argc.reg(), x10, MemOperand(x0, 2 * kPointerSize));
+ }
+
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (is_store) {
+ if (return_first_arg) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ int stack_space = 0;
+ MemOperand is_construct_call_operand =
+ MemOperand(masm->StackPointer(), 4 * kPointerSize);
+ MemOperand* stack_space_operand = &is_construct_call_operand;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
+ }
const int spill_offset = 1 + kApiStackSpace;
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- spill_offset,
- return_value_operand,
- &context_restore_operand);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
+ stack_space_operand, spill_offset,
+ return_value_operand, &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
+ call_data_undefined);
+}
+
+
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
@@ -5181,12 +5504,9 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_accessor_getter_callback(isolate());
const int spill_offset = 1 + kApiStackSpace;
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- spill_offset,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, NULL, spill_offset,
+ MemOperand(fp, 6 * kPointerSize), NULL);
}
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index b83bbbe121..8c4b776efe 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -132,6 +132,9 @@ void Deoptimizer::EntryGenerator::Generate() {
saved_registers.Combine(fp);
__ PushCPURegList(saved_registers);
+ __ Mov(x3, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ Str(fp, MemOperand(x3));
+
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSize) +
(saved_fp_registers.Count() * kDRegSize);
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/arm64/full-codegen-arm64.cc
index 0d3d34b695..88b31a4da5 100644
--- a/deps/v8/src/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/arm64/full-codegen-arm64.cc
@@ -124,7 +124,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ if (is_sloppy(info->language_mode()) && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
@@ -155,7 +155,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!info->function()->is_generator() || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
@@ -197,7 +197,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
- __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
+ __ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
@@ -241,6 +241,25 @@ void FullCodeGenerator::Generate() {
}
}
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Add(x3, fp, StandardFrameConstants::kCallerSPOffset + offset);
+ __ Mov(x2, Smi::FromInt(num_parameters));
+ __ Mov(x1, Smi::FromInt(rest_index));
+ __ Push(x3, x2, x1);
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, x0, x1, x2);
+ }
+
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
@@ -262,15 +281,19 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type);
+ ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, x0, x1, x2);
@@ -455,7 +478,11 @@ void FullCodeGenerator::EmitReturnSequence() {
__ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
__ add(current_sp, current_sp, ip0);
__ ret();
- __ dc64(kXRegSize * (info_->scope()->num_parameters() + 1));
+ int32_t arg_count = info_->scope()->num_parameters() + 1;
+ if (IsSubclassConstructor(info_->function()->kind())) {
+ arg_count++;
+ }
+ __ dc64(kXRegSize * arg_count);
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -927,15 +954,16 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
+ ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
- DCHECK(variable->interface()->IsFrozen());
+ DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(x1, scope_->ContextChainLength(scope_->ScriptScope()));
- __ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
+ __ Ldr(x1, ContextMemOperand(x1, descriptor->Index()));
__ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
// Assign it.
@@ -1247,6 +1275,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
// Generate code for the body of the loop.
@@ -1287,7 +1316,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ Mov(x2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1517,6 +1546,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
+ } else if (var->is_this()) {
+ CHECK(info_->function() != nullptr &&
+ (info_->function()->kind() & kSubclassConstructor) != 0);
+ // TODO(dslomov): implement 'this' hole check elimination.
+ skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
@@ -1667,11 +1701,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
__ Push(x0); // Save result on stack
@@ -1714,7 +1750,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
- __ Mov(x0, Smi::FromInt(SLOPPY)); // Strict mode
+ __ Mov(x0, Smi::FromInt(SLOPPY)); // Language mode
__ Push(x0);
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
@@ -1723,21 +1759,22 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::PROTOTYPE:
- if (property->emit_store()) {
- // Duplicate receiver on stack.
- __ Peek(x0, 0);
- __ Push(x0);
- VisitForStackValue(value);
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- VisitForEffect(value);
- }
+ DCHECK(property->emit_store());
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ __ Push(x0);
+ VisitForStackValue(value);
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
break;
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = value;
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = value;
+ }
break;
}
}
@@ -1759,6 +1796,69 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ Push(x0); // Save result on stack
+ result_saved = true;
+ }
+
+ __ Peek(x10, 0); // Duplicate receiver.
+ __ Push(x10);
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ __ Mov(x0, Smi::FromInt(NONE));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ Mov(x0, Smi::FromInt(NONE));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ Mov(x0, Smi::FromInt(NONE));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ break;
+ }
+ }
+ }
+
if (expr->has_function()) {
DCHECK(result_saved);
__ Peek(x0, 0);
@@ -1810,6 +1910,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
bool result_saved = false; // Is the result saved to the stack?
@@ -1946,19 +2047,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ Push(x0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
- mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
+ EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
@@ -2048,7 +2145,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left_expr,
Expression* right_expr) {
Label done, both_smis, stub_call;
@@ -2066,7 +2162,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
{
Assembler::BlockPoolsScope scope(masm_);
CallIC(code, expr->BinaryOperationFeedbackId());
@@ -2146,11 +2242,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ Pop(x1);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{
Assembler::BlockPoolsScope scope(masm_);
@@ -2175,9 +2269,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
- Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- DCHECK(key != NULL);
if (property->is_static()) {
__ Peek(scratch, kPointerSize); // constructor
@@ -2185,24 +2277,29 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ Peek(scratch, 0); // prototype
}
__ Push(scratch);
- VisitForStackValue(key);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
- __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ __ Mov(x0, Smi::FromInt(DONT_ENUM));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
break;
case ObjectLiteral::Property::SETTER:
- __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ __ Mov(x0, Smi::FromInt(DONT_ENUM));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
break;
default:
@@ -2287,7 +2384,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::ValueRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
break;
}
@@ -2354,7 +2451,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
if (var->IsLookupSlot()) {
// Assignment to var.
__ Mov(x11, Operand(var->name()));
- __ Mov(x10, Smi::FromInt(strict_mode()));
+ __ Mov(x10, Smi::FromInt(language_mode()));
// jssp[0] : mode.
// jssp[8] : name.
// jssp[16] : context.
@@ -2373,7 +2470,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+ } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
}
@@ -2408,8 +2505,8 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(x0);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
+ __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
4);
}
@@ -2421,9 +2518,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(x0);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime(
+ (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
@@ -2437,7 +2535,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(x0));
- Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2460,8 +2559,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(x0);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
@@ -2476,8 +2573,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
- context()->Plug(x0);
}
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(x0);
}
@@ -2633,9 +2731,8 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(
- isolate(), arg_count, call_type);
- __ Mov(x3, SmiFromSlot(expr->CallFeedbackSlot()));
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2664,7 +2761,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
// Prepare to push the language mode.
- __ Mov(x12, Smi::FromInt(strict_mode()));
+ __ Mov(x12, Smi::FromInt(language_mode()));
// Prepare to push the start position of the scope the calls resides in.
__ Mov(x13, Smi::FromInt(scope()->start_position()));
@@ -2676,8 +2773,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
- DCHECK(super_ref != NULL);
+void FullCodeGenerator::EmitLoadSuperConstructor() {
__ ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(x0);
__ CallRuntime(Runtime::kGetPrototype, 1);
@@ -2801,11 +2897,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
}
} else if (call_type == Call::SUPER_CALL) {
- SuperReference* super_ref = callee->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ Push(result_register());
- VisitForStackValue(super_ref->this_var());
- EmitCall(expr, CallICState::METHOD);
+ EmitSuperConstructorCall(expr);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -2834,12 +2926,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- if (expr->expression()->IsSuperReference()) {
- EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
- __ Push(result_register());
- } else {
- VisitForStackValue(expr->expression());
- }
+ DCHECK(!expr->expression()->IsSuperReference());
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -2873,6 +2961,66 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ if (!ValidateSuperCall(expr)) return;
+
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ push(result_register());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into x1 and x0.
+ __ Mov(x0, arg_count);
+ __ Peek(x1, arg_count * kXRegSize);
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ UNREACHABLE();
+ /* TODO(dslomov): support pretenuring.
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ */
+ }
+
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, SmiFromSlot(expr->CallFeedbackSlot()));
+
+ CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ RecordJSReturnSite(expr);
+
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ Variable* this_var = super_ref->this_var()->var();
+ GetVar(x1, this_var);
+ Label uninitialized_this;
+ __ JumpIfRoot(x1, Heap::kTheHoleValueRootIndex, &uninitialized_this);
+ __ Mov(x0, Operand(this_var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
+ EmitVariableAssignment(this_var, Token::INIT_CONST);
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3416,7 +3564,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3746,6 +3894,60 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ Push(result_register());
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, args_set_up, runtime;
+ __ Ldr(x11, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x12, MemOperand(x11, StandardFrameConstants::kContextOffset));
+ __ Cmp(x12, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &adaptor_frame);
+ // default constructor has no arguments, so no adaptor frame means no args.
+ __ Mov(x0, Operand(0));
+ __ B(&args_set_up);
+
+ // Copy arguments from adaptor frame.
+ {
+ __ bind(&adaptor_frame);
+ __ Ldr(x1, MemOperand(x11, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(x1, x1);
+
+ // Subtract 1 from arguments count, for new.target.
+ __ Sub(x1, x1, Operand(1));
+ __ Mov(x0, x1);
+
+ // Get arguments pointer in x11.
+ __ Add(x11, x11, Operand(x1, LSL, kPointerSizeLog2));
+ __ Add(x11, x11, StandardFrameConstants::kCallerSPOffset);
+ Label loop;
+ __ bind(&loop);
+ // Pre-decrement x11 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Ldr(x10, MemOperand(x11, -kPointerSize, PreIndex));
+ __ Push(x10);
+ __ Sub(x1, x1, Operand(1));
+ __ Cbnz(x1, &loop);
+ }
+
+ __ bind(&args_set_up);
+ __ Peek(x1, Operand(x0, LSL, kPointerSizeLog2));
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ context()->Plug(result_register());
+}
+
+
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
@@ -3762,7 +3964,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(2, args->length());
- DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
@@ -4140,7 +4342,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ Mov(x10, Smi::FromInt(strict_mode()));
+ __ Mov(x10, Smi::FromInt(language_mode()));
__ Push(x10);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(x0);
@@ -4148,7 +4350,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- DCHECK(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(is_sloppy(language_mode()) || var->is_this());
if (var->IsUnallocated()) {
__ Ldr(x12, GlobalObjectMemOperand());
__ Mov(x11, Operand(var->name()));
@@ -4360,6 +4562,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4396,8 +4599,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{
Assembler::BlockPoolsScope scope(masm_);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
}
@@ -4466,7 +4668,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::NameRegister());
__ Pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 57eebcc3b5..6deeabfcf5 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -124,7 +124,23 @@ void CreateAllocationSiteDescriptor::Initialize(
// x2: feedback vector
// x3: call feedback slot
Register registers[] = {cp, x2, x3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: feedback vector
+ // x3: call feedback slot
+ // x1: tagged value to put in the weak cell
+ Register registers[] = {cp, x2, x3, x1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
@@ -152,6 +168,16 @@ void CallFunctionWithFeedbackDescriptor::Initialize(
}
+void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, x1, x3, x2};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// x0 : number of arguments
// x1 : the function to call
@@ -362,6 +388,31 @@ void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
x4, // call_data
x2, // holder
x1, // api_function_address
+ x3, // actual number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Integer32(), // actual number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations,
+ &default_descriptor);
+}
+
+
+void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ static PlatformInterfaceDescriptor default_descriptor =
+ PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ Register registers[] = {
+ cp, // context
+ x0, // callee
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index 2d5f7f297c..0234fcddca 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -142,6 +142,20 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
+void LCallFunction::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ if (hydrogen()->HasVectorAndSlot()) {
+ stream->Add(" (type-feedback-vector ");
+ temp_vector()->PrintTo(stream);
+ stream->Add(" ");
+ temp_slot()->PrintTo(stream);
+ stream->Add(")");
+ }
+}
+
+
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -1053,7 +1067,15 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), x1);
- LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(x3);
+ vector = FixedTemp(x2);
+ }
+
+ LCallFunction* call =
+ new (zone()) LCallFunction(context, function, slot, vector);
return MarkAsCall(DefineFixed(call, x0), instr);
}
@@ -1963,7 +1985,9 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
: UseRegisterAtStart(least_const);
LInstruction* result =
DefineAsRegister(new(zone()) LMulConstIS(left, right));
- if ((bailout_on_minus_zero && constant <= 0) || can_overflow) {
+ if ((bailout_on_minus_zero && constant <= 0) ||
+ (can_overflow && constant != 1 &&
+ base::bits::IsPowerOfTwo32(constant_abs))) {
result = AssignEnvironment(result);
}
return result;
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
index 424ecba12d..8b48729302 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -847,20 +847,26 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LCallFunction(LOperand* context, LOperand* function) {
+ LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = function;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
int arity() const { return hydrogen()->argument_count() - 1; }
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -2576,7 +2582,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
@@ -2624,7 +2630,7 @@ class LStoreNamedGeneric FINAL: public LTemplateInstruction<0, 3, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index df9e7b5ae5..ef01c91d47 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -410,8 +410,30 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).Is(x0));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(x3));
+ DCHECK(vector_register.is(x2));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ Mov(vector_register, vector);
+ __ Mov(slot_register, Operand(Smi::FromInt(index)));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
after_push_argument_ = false;
}
@@ -640,8 +662,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() &&
- info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
@@ -835,7 +856,7 @@ bool LCodeGen::GenerateJumpTable() {
__ Bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load the base
@@ -918,7 +939,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
}
@@ -942,6 +962,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -984,8 +1005,9 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
void LCodeGen::DeoptimizeBranch(
- LInstruction* instr, const char* detail, BranchType branch_type,
- Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
+ LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
+ BranchType branch_type, Register reg, int bit,
+ Deoptimizer::BailoutType* override_bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
Deoptimizer::BailoutType bailout_type =
@@ -1035,21 +1057,22 @@ void LCodeGen::DeoptimizeBranch(
__ Bind(&dont_trap);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
if (branch_type == always &&
frame_is_built_ && !info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Deoptimizer::JumpTableEntry* table_entry =
- new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
- !frame_is_built_);
+ new (zone()) Deoptimizer::JumpTableEntry(
+ entry, deopt_info, bailout_type, !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() ||
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
!table_entry->IsEquivalentTo(*jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
@@ -1058,85 +1081,89 @@ void LCodeGen::DeoptimizeBranch(
}
-void LCodeGen::Deoptimize(LInstruction* instr, const char* detail,
+void LCodeGen::Deoptimize(LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType* override_bailout_type) {
- DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
+ DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
+ override_bailout_type);
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
}
void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, reg_zero, rt);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
}
void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, reg_not_zero, rt);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
}
void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
- const char* detail) {
+ Deoptimizer::DeoptReason deopt_reason) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
- DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
+ DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
- const char* detail) {
- DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- const char* detail) {
- DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* detail) {
+ LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason) {
__ CompareRoot(rt, index);
- DeoptimizeIf(eq, instr, detail);
+ DeoptimizeIf(eq, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* detail) {
+ LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason) {
__ CompareRoot(rt, index);
- DeoptimizeIf(ne, instr, detail);
+ DeoptimizeIf(ne, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- const char* detail) {
+ Deoptimizer::DeoptReason deopt_reason) {
__ TestForMinusZero(input);
- DeoptimizeIf(vs, instr, detail);
+ DeoptimizeIf(vs, instr, deopt_reason);
}
void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
__ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, "not heap number");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber);
}
void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
}
void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- const char* detail) {
- DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
+ Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
}
@@ -1510,7 +1537,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Add(result, left, right);
}
@@ -1524,7 +1551,7 @@ void LCodeGen::DoAddS(LAddS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Add(result, left, right);
}
@@ -1650,7 +1677,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr, "too many arguments");
+ DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -1777,8 +1804,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -1832,7 +1858,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
- DeoptimizeIf(cond, instr, "out of bounds");
+ DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
}
}
@@ -1911,7 +1937,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr, "Smi");
+ DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
}
Register map = NoReg;
@@ -1972,7 +1998,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- Deoptimize(instr, "unexpected object");
+ Deoptimize(instr, Deoptimizer::kUnexpectedObject);
}
}
}
@@ -1980,27 +2006,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- Register function_reg) {
+ int formal_parameter_count, int arity,
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
// The function interface relies on the following register assignments.
- DCHECK(function_reg.Is(x1) || function_reg.IsNone());
+ Register function_reg = x1;
Register arity_reg = x0;
LPointerMap* pointers = instr->pointer_map();
- // If necessary, load the function object.
- if (function_reg.IsNone()) {
- function_reg = x1;
- __ LoadObject(function_reg, function);
- }
-
if (FLAG_debug_code) {
Label is_not_smi;
// Try to confirm that function_reg (x1) is a tagged pointer.
@@ -2186,7 +2204,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr, "instance migration failed");
+ DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
}
@@ -2241,7 +2259,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
}
__ Bind(&success);
@@ -2250,7 +2268,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi");
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
}
}
@@ -2258,7 +2276,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr, "not a Smi");
+ DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
}
@@ -2276,15 +2294,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr, "wrong instance type");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr, "wrong instance type");
+ DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr, "wrong instance type");
+ DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
}
} else {
uint8_t mask;
@@ -2295,10 +2313,10 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
- "wrong instance type");
+ Deoptimizer::kWrongInstanceType);
} else {
DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
- "wrong instance type");
+ Deoptimizer::kWrongInstanceType);
}
} else {
if (tag == 0) {
@@ -2307,7 +2325,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
- DeoptimizeIf(ne, instr, "wrong instance type");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
}
}
}
@@ -2347,7 +2365,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is coverted to zero for clamping conversion.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- "not a heap number/undefined");
+ Deoptimizer::kNotAHeapNumberUndefined);
__ Mov(result, 0);
__ B(&done);
@@ -2652,7 +2670,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, "value mismatch");
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
}
@@ -2676,9 +2694,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
- DeoptimizeIfSmi(object, instr, "Smi");
+ DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, "not a date object");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -2728,21 +2746,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, "division by zero");
+ DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
- DeoptimizeIf(ne, instr, "lost precision");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -2770,14 +2788,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, "division by zero");
+ Deoptimize(instr, Deoptimizer::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, "minus zero");
+ DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -2789,7 +2807,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr, "lost precision");
+ DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
}
}
@@ -2806,13 +2824,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ Sdiv(result, dividend, divisor);
if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DCHECK_EQ(NULL, instr->temp());
+ DCHECK(!instr->temp());
return;
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, "division by zero");
+ DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) as that will produce negative zero.
@@ -2824,7 +2842,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
// Check for (kMinInt / -1).
@@ -2836,13 +2854,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr, "lost precision");
+ DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
}
@@ -2851,11 +2869,11 @@ void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, "minus zero");
+ DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->tag_result()) {
__ SmiTag(result.X());
@@ -2887,7 +2905,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ Mov(x2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -2916,7 +2934,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr, "no cache");
+ DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
__ Bind(&done);
}
@@ -2929,17 +2947,18 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(object.Is(x0));
- DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined");
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr,
+ Deoptimizer::kUndefined);
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(object, null_value);
- DeoptimizeIf(eq, instr, "null");
+ DeoptimizeIf(eq, instr, Deoptimizer::kNull);
- DeoptimizeIfSmi(object, instr, "Smi");
+ DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, "not a JavaScript object");
+ DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
Label use_cache, call_runtime;
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
@@ -2953,7 +2972,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map");
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
+ Deoptimizer::kWrongMap);
__ Bind(&use_cache);
}
@@ -3216,9 +3236,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- x1);
+ instr->arity(), instr);
}
after_push_argument_ = false;
}
@@ -3346,7 +3364,8 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ Ldr(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
@@ -3367,7 +3386,8 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -3387,7 +3407,8 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
}
}
@@ -3523,7 +3544,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr, "negative value");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -3615,12 +3636,9 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
Register scratch = ToRegister(instr->temp());
- // Detect the hole NaN by adding one to the integer representation of the
- // result, and checking for overflow.
- STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
- __ Ldr(scratch, mem_op);
- __ Cmn(scratch, 1);
- DeoptimizeIf(vs, instr, "hole");
+ __ Fmov(scratch, result);
+ __ Eor(scratch, scratch, kHoleNanInt64);
+ DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
}
}
@@ -3658,9 +3676,10 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr, "not a Smi");
+ DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
} else {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
}
}
}
@@ -3764,7 +3783,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
}
@@ -3916,7 +3935,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, "minus zero");
+ DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
}
__ Fcvtms(result, input);
@@ -3926,7 +3945,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
}
@@ -3952,13 +3971,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
}
return;
}
@@ -3981,14 +4000,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, "division by zero");
+ Deoptimize(instr, Deoptimizer::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, "minus zero");
+ DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -4031,14 +4050,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
- DeoptimizeIfZero(divisor, instr, "division by zero");
+ DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
// Check for (0 / -x) that will produce negative zero.
@@ -4048,7 +4067,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
Label done;
@@ -4207,18 +4226,18 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
- DeoptimizeIf(hi, instr, "overflow");
+ DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr, "minus zero");
+ DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr, "NaN");
+ DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
@@ -4296,7 +4315,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
__ B(&done);
}
@@ -4315,7 +4334,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
- Deoptimize(instr, "division by zero");
+ Deoptimize(instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -4329,7 +4348,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr, "minus zero");
+ DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -4344,12 +4363,12 @@ void LCodeGen::DoModI(LModI* instr) {
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, "division by zero");
+ DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr, "minus zero");
+ DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
}
__ Bind(&done);
}
@@ -4372,10 +4391,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr, "minus zero");
+ DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr, "minus zero");
+ DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
}
}
@@ -4385,7 +4404,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Neg(result, left);
}
@@ -4401,7 +4420,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
case 2:
if (can_overflow) {
__ Adds(result, left, left);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Add(result, left, left);
}
@@ -4420,7 +4439,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr, "overflow");
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
}
if (right >= 0) {
@@ -4430,7 +4449,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
@@ -4488,13 +4507,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, "minus zero");
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr, "overflow");
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
} else {
__ Mul(result, left, right);
}
@@ -4518,7 +4537,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, "minus zero");
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
@@ -4526,7 +4545,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
- DeoptimizeIf(ne, instr, "overflow");
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
@@ -4702,14 +4721,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr, "minus zero");
+ DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- "not a heap number/undefined");
+ Deoptimizer::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -4903,7 +4922,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr, "overflow");
+ DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
}
__ SmiTag(output, input);
}
@@ -4915,7 +4934,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Label done, untag;
if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr, "not a Smi");
+ DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
}
__ Bind(&untag);
@@ -4940,7 +4959,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, "negative value");
+ DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
}
break;
default: UNREACHABLE();
@@ -4950,7 +4969,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, "negative value");
+ DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
@@ -5003,7 +5022,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, "negative value");
+ DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
}
break;
default: UNREACHABLE();
@@ -5013,7 +5032,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, "negative value");
+ DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
}
__ Mov(result, left);
} else {
@@ -5141,7 +5160,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
@@ -5179,7 +5199,8 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
Register payload = ToRegister(instr->temp2());
__ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole");
+ DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr,
+ Deoptimizer::kHole);
}
// Store the value.
@@ -5361,7 +5382,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5471,7 +5492,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5601,7 +5622,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Sub(result, left, right);
}
@@ -5615,7 +5636,7 @@ void LCodeGen::DoSubS(LSubS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, "overflow");
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
} else {
__ Sub(result, left, right);
}
@@ -5656,7 +5677,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// Output contains zero, undefined is converted to zero for truncating
// conversions.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- "not a heap number/undefined/true/false");
+ Deoptimizer::kNotAHeapNumberUndefinedBoolean);
} else {
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
@@ -5667,13 +5688,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
- DeoptimizeIfNegative(scratch1, instr, "minus zero");
+ DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
}
}
__ Bind(&done);
@@ -5814,7 +5835,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, "memento found");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
__ Bind(&no_memento_found);
}
@@ -5939,7 +5960,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
}
@@ -5973,10 +5994,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr, "Smi");
+ DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
__ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, &copy_receiver);
- Deoptimize(instr, "not a JavaScript object");
+ Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/arm64/lithium-codegen-arm64.h
index a73bb8caaf..fe16a4e59c 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.h
@@ -213,31 +213,38 @@ class LCodeGen: public LCodeGenBase {
Register temp,
LOperand* index,
String::Encoding encoding);
- void DeoptimizeBranch(LInstruction* instr, const char* detail,
+ void DeoptimizeBranch(LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
BranchType branch_type, Register reg = NoReg,
int bit = -1,
Deoptimizer::BailoutType* override_bailout_type = NULL);
- void Deoptimize(LInstruction* instr, const char* detail,
+ void Deoptimize(LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType* override_bailout_type = NULL);
- void DeoptimizeIf(Condition cond, LInstruction* instr, const char* detail);
- void DeoptimizeIfZero(Register rt, LInstruction* instr, const char* detail);
+ void DeoptimizeIf(Condition cond, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason);
+ void DeoptimizeIfZero(Register rt, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason);
void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- const char* detail);
+ Deoptimizer::DeoptReason deopt_reason);
void DeoptimizeIfNegative(Register rt, LInstruction* instr,
- const char* detail);
- void DeoptimizeIfSmi(Register rt, LInstruction* instr, const char* detail);
- void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, const char* detail);
+ Deoptimizer::DeoptReason deopt_reason);
+ void DeoptimizeIfSmi(Register rt, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason);
+ void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason);
void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* detail);
+ LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason);
void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, const char* detail);
+ LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason);
void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- const char* detail);
+ Deoptimizer::DeoptReason deopt_reason);
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- const char* detail);
+ Deoptimizer::DeoptReason deopt_reason);
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- const char* detail);
+ Deoptimizer::DeoptReason deopt_reason);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
@@ -318,15 +325,11 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
- // Generate a direct call to a known function.
- // If the function is already loaded into x1 by the caller, function_reg may
- // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
- // automatically load it.
+ // Generate a direct call to a known function. Expects the function
+ // to be in x1.
void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- Register function_reg = NoReg);
+ int formal_parameter_count, int arity,
+ LInstruction* instr);
// Support for recording safepoint and position information.
void RecordAndWritePosition(int position) OVERRIDE;
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 0253e7cdcc..8b559755a0 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1453,6 +1453,19 @@ void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
}
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ Ldr(dst,
+ FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
+ : AccessorPair::kSetterOffset;
+ Ldr(dst, FieldMemOperand(dst, offset));
+}
+
+
void MacroAssembler::CheckEnumCache(Register object,
Register null_value,
Register scratch0,
@@ -1748,156 +1761,6 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
}
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- int spill_offset,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
- ASM_LOCATION("CallApiFunctionAndReturn");
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
-
- DCHECK(function_address.is(x1) || function_address.is(x2));
-
- Label profiler_disabled;
- Label end_profiler_check;
- Mov(x10, ExternalReference::is_profiling_address(isolate()));
- Ldrb(w10, MemOperand(x10));
- Cbz(w10, &profiler_disabled);
- Mov(x3, thunk_ref);
- B(&end_profiler_check);
-
- Bind(&profiler_disabled);
- Mov(x3, function_address);
- Bind(&end_profiler_check);
-
- // Save the callee-save registers we are going to use.
- // TODO(all): Is this necessary? ARM doesn't do it.
- STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
- Poke(x19, (spill_offset + 0) * kXRegSize);
- Poke(x20, (spill_offset + 1) * kXRegSize);
- Poke(x21, (spill_offset + 2) * kXRegSize);
- Poke(x22, (spill_offset + 3) * kXRegSize);
-
- // Allocate HandleScope in callee-save registers.
- // We will need to restore the HandleScope after the call to the API function,
- // by allocating it in callee-save registers they will be preserved by C code.
- Register handle_scope_base = x22;
- Register next_address_reg = x19;
- Register limit_reg = x20;
- Register level_reg = w21;
-
- Mov(handle_scope_base, next_address);
- Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
- Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
- Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
- Add(level_reg, level_reg, 1);
- Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- Mov(x0, ExternalReference::isolate_address(isolate()));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(this, x3);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- Mov(x0, ExternalReference::isolate_address(isolate()));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // Load value from ReturnValue.
- Ldr(x0, return_value_operand);
- Bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
- if (emit_debug_code()) {
- Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
- Cmp(w1, level_reg);
- Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
- }
- Sub(level_reg, level_reg, 1);
- Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
- Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
- Cmp(limit_reg, x1);
- B(ne, &delete_allocated_handles);
-
- Bind(&leave_exit_frame);
- // Restore callee-saved registers.
- Peek(x19, (spill_offset + 0) * kXRegSize);
- Peek(x20, (spill_offset + 1) * kXRegSize);
- Peek(x21, (spill_offset + 2) * kXRegSize);
- Peek(x22, (spill_offset + 3) * kXRegSize);
-
- // Check if the function scheduled an exception.
- Mov(x5, ExternalReference::scheduled_exception_address(isolate()));
- Ldr(x5, MemOperand(x5));
- JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
- Bind(&exception_handled);
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- Ldr(cp, *context_restore_operand);
- }
-
- LeaveExitFrame(false, x1, !restore_context);
- Drop(stack_space);
- Ret();
-
- Bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallExternalReference(
- ExternalReference(
- Runtime::kPromoteScheduledException, isolate()), 0);
- }
- B(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- Bind(&delete_allocated_handles);
- Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
- // Save the return value in a callee-save register.
- Register saved_result = x19;
- Mov(saved_result, x0);
- Mov(x0, ExternalReference::isolate_address(isolate()));
- CallCFunction(
- ExternalReference::delete_handle_scope_extensions(isolate()), 1);
- Mov(x0, saved_result);
- B(&leave_exit_frame);
-}
-
-
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments) {
Mov(x0, num_arguments);
@@ -3812,10 +3675,15 @@ void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
}
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Mov(value, Operand(cell));
Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+ Label* miss) {
+ GetWeakValue(value, cell);
JumpIfSmi(value, miss);
}
@@ -4073,7 +3941,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(
Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
Check(lt, kIndexIsTooLarge);
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
Cmp(index, 0);
Check(ge, kIndexIsNegative);
}
@@ -4232,7 +4100,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
- DCHECK_EQ(FIELD, 0);
+ DCHECK_EQ(DATA, 0);
TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
// Get the value at the masked, scaled index and return.
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index ee4358955f..3a0df0b049 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -881,6 +881,8 @@ class MacroAssembler : public Assembler {
void EnumLengthUntagged(Register dst, Register map);
void EnumLengthSmi(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template<typename Field>
void DecodeField(Register dst, Register src) {
@@ -1130,24 +1132,6 @@ class MacroAssembler : public Assembler {
int num_reg_arguments,
int num_double_arguments);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions.
- // 'stack_space' is the space to be unwound on exit (includes the call JS
- // arguments space and the additional space allocated for the fast call).
- // 'spill_offset' is the offset from the stack pointer where
- // CallApiFunctionAndReturn can spill registers.
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- int spill_offset,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand);
-
- // The number of register that CallApiFunctionAndReturn will need to save on
- // the stack. The space for these registers need to be allocated in the
- // ExitFrame before calling CallApiFunctionAndReturn.
- static const int kCallApiFunctionSpillSpace = 4;
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
// Tail call of a runtime routine (jump).
@@ -1488,6 +1472,8 @@ class MacroAssembler : public Assembler {
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+ void GetWeakValue(Register value, Handle<WeakCell> cell);
+
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
index e9a485d090..1630f212bf 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
@@ -111,12 +111,11 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
+ Zone* zone, Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
index 632c513643..da6b26b925 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
@@ -17,7 +17,8 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerARM64(Mode mode, int registers_to_save, Zone* zone);
+ RegExpMacroAssemblerARM64(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
virtual ~RegExpMacroAssemblerARM64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index bc524af72c..819a89765d 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -12,6 +12,7 @@
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/simulator-arm64.h"
#include "src/assembler.h"
+#include "src/codegen.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
@@ -3100,7 +3101,7 @@ T Simulator::FPSqrt(T op) {
} else if (op < 0.0) {
return FPDefaultNaN<T>();
} else {
- return std::sqrt(op);
+ return fast_sqrt(op);
}
}
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index c702d8caf3..784f51f0e4 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -361,7 +361,7 @@ function ArrayToString() {
func = array.join;
}
if (!IS_SPEC_FUNCTION(func)) {
- return %_CallFunction(array, NoSideEffectsObjectToString);
+ return %_CallFunction(array, DefaultObjectToString);
}
return %_CallFunction(array, func);
}
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index c73b470c28..f4299ed717 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -65,6 +65,8 @@
#include "src/arm64/assembler-arm64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/assembler-ppc-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
@@ -85,6 +87,8 @@
#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
@@ -107,7 +111,6 @@ double min_int;
double one_half;
double minus_one_half;
double negative_infinity;
-double canonical_non_hole_nan;
double the_hole_nan;
double uint32_bias;
};
@@ -253,6 +256,7 @@ int Label::pos() const {
// position: 01
// statement_position: 10
// comment: 11 (not used in short_data_record)
+// deopt_reason: 11 (not used in long_data_record)
//
// Long record format:
// 4-bit middle_tag:
@@ -325,6 +329,10 @@ const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
+// Reuse the same value for deopt reason tag in short record format.
+// It is possible because we use kCommentTag only for the long record format.
+const int kDeoptReasonTag = 3;
+
const int kPoolExtraTag = kPCJumpExtraTag - 2;
const int kConstPoolTag = 0;
const int kVeneerPoolTag = 1;
@@ -407,7 +415,38 @@ void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
}
+void RelocInfoWriter::WritePosition(int pc_delta, int pos_delta,
+ RelocInfo::Mode rmode) {
+ int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
+ : kStatementPositionTag;
+ // Check if delta is small enough to fit in a tagged byte.
+ if (is_intn(pos_delta, kSmallDataBits)) {
+ WriteTaggedPC(pc_delta, kLocatableTag);
+ WriteTaggedData(pos_delta, pos_type_tag);
+ } else {
+ // Otherwise, use costly encoding.
+ WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+ WriteExtraTaggedIntData(pos_delta, pos_type_tag);
+ }
+}
+
+
+void RelocInfoWriter::FlushPosition() {
+ if (!next_position_candidate_flushed_) {
+ WritePosition(next_position_candidate_pc_delta_,
+ next_position_candidate_pos_delta_, RelocInfo::POSITION);
+ next_position_candidate_pos_delta_ = 0;
+ next_position_candidate_pc_delta_ = 0;
+ next_position_candidate_flushed_ = true;
+ }
+}
+
+
void RelocInfoWriter::Write(const RelocInfo* rinfo) {
+ RelocInfo::Mode rmode = rinfo->rmode();
+ if (rmode != RelocInfo::POSITION) {
+ FlushPosition();
+ }
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
@@ -417,7 +456,6 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
<= kMaxStandardNonCompactModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
- RelocInfo::Mode rmode = rinfo->rmode();
// The two most common modes are given small tags, and usually fit in a byte.
if (rmode == RelocInfo::EMBEDDED_OBJECT) {
@@ -439,20 +477,26 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
}
last_id_ = static_cast<int>(rinfo->data());
+ } else if (rmode == RelocInfo::DEOPT_REASON) {
+ DCHECK(rinfo->data() < (1 << kSmallDataBits));
+ WriteTaggedPC(pc_delta, kLocatableTag);
+ WriteTaggedData(rinfo->data(), kDeoptReasonTag);
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for position.
DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
- int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
- : kStatementPositionTag;
- // Check if delta is small enough to fit in a tagged byte.
- if (is_intn(pos_delta, kSmallDataBits)) {
- WriteTaggedPC(pc_delta, kLocatableTag);
- WriteTaggedData(pos_delta, pos_type_tag);
+ if (rmode == RelocInfo::STATEMENT_POSITION) {
+ WritePosition(pc_delta, pos_delta, rmode);
} else {
- // Otherwise, use costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedIntData(pos_delta, pos_type_tag);
+ DCHECK(rmode == RelocInfo::POSITION);
+ if (pc_delta != 0 || last_mode_ != RelocInfo::POSITION) {
+ FlushPosition();
+ next_position_candidate_pc_delta_ = pc_delta;
+ next_position_candidate_pos_delta_ = pos_delta;
+ } else {
+ next_position_candidate_pos_delta_ += pos_delta;
+ }
+ next_position_candidate_flushed_ = false;
}
last_position_ = static_cast<int>(rinfo->data());
} else if (RelocInfo::IsComment(rmode)) {
@@ -470,10 +514,11 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
// For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component.
- DCHECK(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
+ DCHECK(saved_mode < kPoolExtraTag);
WriteExtraTaggedPC(pc_delta, saved_mode);
}
last_pc_ = rinfo->pc();
+ last_mode_ = rmode;
#ifdef DEBUG
DCHECK(begin_pos - pos_ <= kMaxSize);
#endif
@@ -580,6 +625,12 @@ inline void RelocIterator::ReadTaggedPosition() {
}
+inline void RelocIterator::ReadTaggedData() {
+ uint8_t unsigned_b = *pos_;
+ rinfo_.data_ = unsigned_b >> kTagBits;
+}
+
+
static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
DCHECK(tag == kNonstatementPositionTag ||
tag == kStatementPositionTag);
@@ -613,9 +664,10 @@ void RelocIterator::next() {
ReadTaggedId();
return;
}
+ } else if (locatable_tag == kDeoptReasonTag) {
+ ReadTaggedData();
+ if (SetMode(RelocInfo::DEOPT_REASON)) return;
} else {
- // Compact encoding is never used for comments,
- // so it must be a position.
DCHECK(locatable_tag == kNonstatementPositionTag ||
locatable_tag == kStatementPositionTag);
if (mode_mask_ & RelocInfo::kPositionMask) {
@@ -780,6 +832,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "external reference";
case RelocInfo::INTERNAL_REFERENCE:
return "internal reference";
+ case RelocInfo::DEOPT_REASON:
+ return "deopt reason";
case RelocInfo::CONST_POOL:
return "constant pool";
case RelocInfo::VENEER_POOL:
@@ -800,6 +854,9 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << static_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
if (IsComment(rmode_)) {
os << " (" << reinterpret_cast<char*>(data_) << ")";
+ } else if (rmode_ == DEOPT_REASON) {
+ os << " (" << Deoptimizer::GetDeoptReason(
+ static_cast<Deoptimizer::DeoptReason>(data_)) << ")";
} else if (rmode_ == EMBEDDED_OBJECT) {
os << " (" << Brief(target_object()) << ")";
} else if (rmode_ == EXTERNAL_REFERENCE) {
@@ -860,6 +917,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case STATEMENT_POSITION:
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
+ case DEOPT_REASON:
case CONST_POOL:
case VENEER_POOL:
case DEBUG_BREAK_SLOT:
@@ -884,7 +942,6 @@ void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
double_constants.minus_one_half = -0.5;
- double_constants.canonical_non_hole_nan = base::OS::nan_value();
double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
double_constants.uint32_bias =
@@ -1242,12 +1299,6 @@ ExternalReference ExternalReference::address_of_negative_infinity() {
}
-ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.canonical_non_hole_nan));
-}
-
-
ExternalReference ExternalReference::address_of_the_hole_nan() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.the_hole_nan));
@@ -1297,6 +1348,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
+#elif V8_TARGET_ARCH_PPC
+ function = FUNCTION_ADDR(RegExpMacroAssemblerPPC::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS64
@@ -1443,15 +1496,18 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) {
-#if defined(__MINGW64_VERSION_MAJOR) && \
- (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
- // MinGW64 has a custom implementation for pow. This handles certain
+#if (defined(__MINGW64_VERSION_MAJOR) && \
+ (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
+ defined(V8_OS_AIX)
+ // MinGW64 and AIX have a custom implementation for pow. This handles certain
// special cases that are different.
- if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) {
+ if ((x == 0.0 || std::isinf(x)) && y != 0.0 && std::isfinite(y)) {
double f;
- if (std::modf(y, &f) != 0.0) {
- return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
- }
+ double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
+ /* retain sign if odd integer exponent */
+ return ((std::modf(y, &f) == 0.0) && (static_cast<int64_t>(y) & 1))
+ ? copysign(result, x)
+ : result;
}
if (x == 2.0) {
@@ -1465,7 +1521,7 @@ double power_double_double(double x, double y) {
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
- return base::OS::nan_value();
+ return std::numeric_limits<double>::quiet_NaN();
}
return std::pow(x, y);
}
@@ -1596,4 +1652,37 @@ bool PositionsRecorder::WriteRecordedPositions() {
return written;
}
+
+// Platform specific but identical code for all the platforms.
+
+
+void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::POSITION, raw_position);
+ RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
+ }
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_code_comments) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index e95b7edf75..eb00f8a7e6 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -365,7 +365,7 @@ class RelocInfo {
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
- DEBUG_BREAK, // Code target for the debugger statement.
+ DEBUG_BREAK, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
@@ -373,9 +373,9 @@ class RelocInfo {
RUNTIME_ENTRY,
JS_RETURN, // Marks start of the ExitJSFrame code.
COMMENT,
- POSITION, // See comment for kNoPosition above.
+ POSITION, // See comment for kNoPosition above.
STATEMENT_POSITION, // See comment for kNoPosition above.
- DEBUG_BREAK_SLOT, // Additional code inserted for debug break slot.
+ DEBUG_BREAK_SLOT, // Additional code inserted for debug break slot.
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
@@ -384,13 +384,20 @@ class RelocInfo {
CONST_POOL,
VENEER_POOL,
+ DEOPT_REASON, // Deoptimization reason index.
+
// add more as needed
// Pseudo-types
- NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
- NONE32, // never recorded 32-bit value
- NONE64, // never recorded 64-bit value
+ NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
+ NONE32, // never recorded 32-bit value
+ NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
+
+ // Encoded internal reference, used only on MIPS and MIPS64.
+ // Re-uses previous ARM-only encoding, to fit in RealRelocMode space.
+ INTERNAL_REFERENCE_ENCODED = CONST_POOL,
+
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
@@ -448,6 +455,9 @@ class RelocInfo {
static inline bool IsVeneerPool(Mode mode) {
return mode == VENEER_POOL;
}
+ static inline bool IsDeoptReason(Mode mode) {
+ return mode == DEOPT_REASON;
+ }
static inline bool IsPosition(Mode mode) {
return mode == POSITION || mode == STATEMENT_POSITION;
}
@@ -460,6 +470,9 @@ class RelocInfo {
static inline bool IsInternalReference(Mode mode) {
return mode == INTERNAL_REFERENCE;
}
+ static inline bool IsInternalReferenceEncoded(Mode mode) {
+ return mode == INTERNAL_REFERENCE_ENCODED;
+ }
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
}
@@ -642,14 +655,24 @@ class RelocInfo {
// lower addresses.
class RelocInfoWriter BASE_EMBEDDED {
public:
- RelocInfoWriter() : pos_(NULL),
- last_pc_(NULL),
- last_id_(0),
- last_position_(0) {}
- RelocInfoWriter(byte* pos, byte* pc) : pos_(pos),
- last_pc_(pc),
- last_id_(0),
- last_position_(0) {}
+ RelocInfoWriter()
+ : pos_(NULL),
+ last_pc_(NULL),
+ last_id_(0),
+ last_position_(0),
+ last_mode_(RelocInfo::NUMBER_OF_MODES),
+ next_position_candidate_pos_delta_(0),
+ next_position_candidate_pc_delta_(0),
+ next_position_candidate_flushed_(true) {}
+ RelocInfoWriter(byte* pos, byte* pc)
+ : pos_(pos),
+ last_pc_(pc),
+ last_id_(0),
+ last_position_(0),
+ last_mode_(RelocInfo::NUMBER_OF_MODES),
+ next_position_candidate_pos_delta_(0),
+ next_position_candidate_pc_delta_(0),
+ next_position_candidate_flushed_(true) {}
byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; }
@@ -663,6 +686,8 @@ class RelocInfoWriter BASE_EMBEDDED {
last_pc_ = pc;
}
+ void Finish() { FlushPosition(); }
+
// Max size (bytes) of a written RelocInfo. Longest encoding is
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, ExtraTag, data_delta.
// On ia32 and arm this is 1 + 4 + 1 + 1 + 1 + 4 = 12.
@@ -679,11 +704,19 @@ class RelocInfoWriter BASE_EMBEDDED {
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
+ inline void WritePosition(int pc_delta, int pos_delta, RelocInfo::Mode rmode);
+
+ void FlushPosition();
byte* pos_;
byte* last_pc_;
int last_id_;
int last_position_;
+ RelocInfo::Mode last_mode_;
+ int next_position_candidate_pos_delta_;
+ uint32_t next_position_candidate_pc_delta_;
+ bool next_position_candidate_flushed_;
+
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
};
@@ -733,6 +766,7 @@ class RelocIterator: public Malloced {
int GetLocatableTypeTag();
void ReadTaggedId();
void ReadTaggedPosition();
+ void ReadTaggedData();
// If the given mode is wanted, set it in rinfo_ and return true.
// Else return false. Used for efficiently skipping unwanted modes.
@@ -925,7 +959,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_one_half();
static ExternalReference address_of_minus_one_half();
static ExternalReference address_of_negative_infinity();
- static ExternalReference address_of_canonical_non_hole_nan();
static ExternalReference address_of_the_hole_nan();
static ExternalReference address_of_uint32_bias();
diff --git a/deps/v8/src/ast-numbering.cc b/deps/v8/src/ast-numbering.cc
index c2dd70e1bb..ea25ad8bbc 100644
--- a/deps/v8/src/ast-numbering.cc
+++ b/deps/v8/src/ast-numbering.cc
@@ -15,12 +15,11 @@ namespace internal {
class AstNumberingVisitor FINAL : public AstVisitor {
public:
- explicit AstNumberingVisitor(Zone* zone)
+ explicit AstNumberingVisitor(Isolate* isolate, Zone* zone)
: AstVisitor(),
next_id_(BailoutId::FirstUsable().ToInt()),
- dont_crankshaft_reason_(kNoReason),
- dont_turbofan_reason_(kNoReason) {
- InitializeAstVisitor(zone);
+ dont_optimize_reason_(kNoReason) {
+ InitializeAstVisitor(isolate, zone);
}
bool Renumber(FunctionLiteral* node);
@@ -45,23 +44,15 @@ class AstNumberingVisitor FINAL : public AstVisitor {
}
void IncrementNodeCount() { properties_.add_node_count(1); }
- void DisableCrankshaft(BailoutReason reason) {
- dont_crankshaft_reason_ = reason;
+ void DisableSelfOptimization() {
properties_.flags()->Add(kDontSelfOptimize);
}
- // TODO(turbofan): Remove the dont_turbofan_reason once no nodes are
- // DontTurbofanNode. That set of nodes must be kept in sync with
- // Pipeline::GenerateCode.
- void DisableTurbofan(BailoutReason reason) {
- dont_crankshaft_reason_ = reason;
- dont_turbofan_reason_ = reason;
+ void DisableOptimization(BailoutReason reason) {
+ dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
- void DisableSelfOptimization() {
- properties_.flags()->Add(kDontSelfOptimize);
- }
void DisableCaching(BailoutReason reason) {
- dont_crankshaft_reason_ = reason;
+ dont_optimize_reason_ = reason;
DisableSelfOptimization();
properties_.flags()->Add(kDontCache);
}
@@ -86,15 +77,11 @@ class AstNumberingVisitor FINAL : public AstVisitor {
}
}
- BailoutReason dont_optimize_reason() const {
- return (dont_turbofan_reason_ != kNoReason) ? dont_turbofan_reason_
- : dont_crankshaft_reason_;
- }
+ BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
int next_id_;
AstProperties properties_;
- BailoutReason dont_crankshaft_reason_;
- BailoutReason dont_turbofan_reason_;
+ BailoutReason dont_optimize_reason_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
@@ -109,14 +96,14 @@ void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
void AstNumberingVisitor::VisitExportDeclaration(ExportDeclaration* node) {
IncrementNodeCount();
- DisableCrankshaft(kExportDeclaration);
+ DisableOptimization(kExportDeclaration);
VisitVariableProxy(node->proxy());
}
void AstNumberingVisitor::VisitModuleUrl(ModuleUrl* node) {
IncrementNodeCount();
- DisableCrankshaft(kModuleUrl);
+ DisableOptimization(kModuleUrl);
}
@@ -137,7 +124,7 @@ void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
IncrementNodeCount();
- DisableCrankshaft(kDebuggerStatement);
+ DisableOptimization(kDebuggerStatement);
node->set_base_id(ReserveIdRange(DebuggerStatement::num_ids()));
}
@@ -145,7 +132,7 @@ void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
IncrementNodeCount();
- DisableCrankshaft(kNativeFunctionLiteral);
+ DisableOptimization(kNativeFunctionLiteral);
node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
}
@@ -165,7 +152,7 @@ void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
IncrementNodeCount();
if (node->var()->IsLookupSlot()) {
- DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
+ DisableOptimization(kReferenceToAVariableWhichRequiresDynamicLookup);
}
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
@@ -180,7 +167,7 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
IncrementNodeCount();
- DisableTurbofan(kSuperReference);
+ DisableOptimization(kSuperReference);
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(SuperReference::num_ids()));
Visit(node->this_var());
@@ -189,7 +176,7 @@ void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
void AstNumberingVisitor::VisitModuleDeclaration(ModuleDeclaration* node) {
IncrementNodeCount();
- DisableCrankshaft(kModuleDeclaration);
+ DisableOptimization(kModuleDeclaration);
VisitVariableProxy(node->proxy());
Visit(node->module());
}
@@ -197,29 +184,22 @@ void AstNumberingVisitor::VisitModuleDeclaration(ModuleDeclaration* node) {
void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
IncrementNodeCount();
- DisableCrankshaft(kImportDeclaration);
+ DisableOptimization(kImportDeclaration);
VisitVariableProxy(node->proxy());
Visit(node->module());
}
-void AstNumberingVisitor::VisitModuleVariable(ModuleVariable* node) {
- IncrementNodeCount();
- DisableCrankshaft(kModuleVariable);
- Visit(node->proxy());
-}
-
-
void AstNumberingVisitor::VisitModulePath(ModulePath* node) {
IncrementNodeCount();
- DisableCrankshaft(kModulePath);
+ DisableOptimization(kModulePath);
Visit(node->module());
}
void AstNumberingVisitor::VisitModuleStatement(ModuleStatement* node) {
IncrementNodeCount();
- DisableCrankshaft(kModuleStatement);
+ DisableOptimization(kModuleStatement);
Visit(node->body());
}
@@ -238,7 +218,7 @@ void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
void AstNumberingVisitor::VisitYield(Yield* node) {
IncrementNodeCount();
- DisableCrankshaft(kYield);
+ DisableOptimization(kYield);
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Yield::num_ids()));
Visit(node->generator_object());
@@ -294,7 +274,7 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
ReserveFeedbackSlots(node);
if (node->is_jsruntime()) {
// Don't try to optimize JS runtime calls because we bailout on them.
- DisableCrankshaft(kCallToAJavaScriptRuntimeFunction);
+ DisableOptimization(kCallToAJavaScriptRuntimeFunction);
}
node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
@@ -303,7 +283,8 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount();
- DisableCrankshaft(kWithStatement);
+ DisableOptimization(kWithStatement);
+ node->set_base_id(ReserveIdRange(WithStatement::num_ids()));
Visit(node->expression());
Visit(node->statement());
}
@@ -329,7 +310,7 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
IncrementNodeCount();
- DisableTurbofan(kTryCatchStatement);
+ DisableOptimization(kTryCatchStatement);
Visit(node->try_block());
Visit(node->catch_block());
}
@@ -337,7 +318,7 @@ void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
- DisableTurbofan(kTryFinallyStatement);
+ DisableOptimization(kTryFinallyStatement);
Visit(node->try_block());
Visit(node->finally_block());
}
@@ -390,7 +371,7 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
- DisableTurbofan(kForOfStatement);
+ DisableOptimization(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator());
Visit(node->next_result());
@@ -452,8 +433,8 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
- DisableTurbofan(kClassLiteral);
- node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
+ DisableOptimization(kClassLiteral);
+ node->set_base_id(ReserveIdRange(node->num_ids()));
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
if (node->class_variable_proxy()) {
@@ -467,7 +448,7 @@ void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(ObjectLiteral::num_ids()));
+ node->set_base_id(ReserveIdRange(node->num_ids()));
for (int i = 0; i < node->properties()->length(); i++) {
VisitObjectLiteralProperty(node->properties()->at(i));
}
@@ -476,6 +457,7 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
void AstNumberingVisitor::VisitObjectLiteralProperty(
ObjectLiteralProperty* node) {
+ if (node->is_computed_name()) DisableOptimization(kComputedPropertyName);
Visit(node->key());
Visit(node->value());
}
@@ -551,12 +533,12 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
if (scope->HasIllegalRedeclaration()) {
scope->VisitIllegalRedeclaration(this);
- DisableCrankshaft(kFunctionWithIllegalRedeclaration);
+ DisableOptimization(kFunctionWithIllegalRedeclaration);
return Finish(node);
}
- if (scope->calls_eval()) DisableCrankshaft(kFunctionCallsEval);
+ if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
- DisableCrankshaft(kContextAllocatedArguments);
+ DisableOptimization(kContextAllocatedArguments);
}
VisitDeclarations(scope->declarations());
@@ -570,8 +552,9 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
}
-bool AstNumbering::Renumber(FunctionLiteral* function, Zone* zone) {
- AstNumberingVisitor visitor(zone);
+bool AstNumbering::Renumber(Isolate* isolate, Zone* zone,
+ FunctionLiteral* function) {
+ AstNumberingVisitor visitor(isolate, zone);
return visitor.Renumber(function);
}
}
diff --git a/deps/v8/src/ast-numbering.h b/deps/v8/src/ast-numbering.h
index ab97c22bdf..c068c2f286 100644
--- a/deps/v8/src/ast-numbering.h
+++ b/deps/v8/src/ast-numbering.h
@@ -11,7 +11,7 @@ namespace internal {
namespace AstNumbering {
// Assign type feedback IDs and bailout IDs to an AST node tree.
//
-bool Renumber(FunctionLiteral* function, Zone* zone);
+bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/ast-this-access-visitor.cc b/deps/v8/src/ast-this-access-visitor.cc
deleted file mode 100644
index cf4a3de842..0000000000
--- a/deps/v8/src/ast-this-access-visitor.cc
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ast-this-access-visitor.h"
-#include "src/parser.h"
-
-namespace v8 {
-namespace internal {
-
-typedef class AstThisAccessVisitor ATAV; // for code shortitude.
-
-ATAV::AstThisAccessVisitor(Zone* zone) : uses_this_(false) {
- InitializeAstVisitor(zone);
-}
-
-
-void ATAV::VisitVariableProxy(VariableProxy* proxy) {
- if (proxy->is_this()) {
- uses_this_ = true;
- }
-}
-
-
-void ATAV::VisitSuperReference(SuperReference* leaf) {
- // disallow super.method() and super(...).
- uses_this_ = true;
-}
-
-
-void ATAV::VisitCallNew(CallNew* e) {
- // new super(..) does not use 'this'.
- if (!e->expression()->IsSuperReference()) {
- Visit(e->expression());
- }
- VisitExpressions(e->arguments());
-}
-
-
-// ---------------------------------------------------------------------------
-// -- Leaf nodes -------------------------------------------------------------
-// ---------------------------------------------------------------------------
-
-void ATAV::VisitVariableDeclaration(VariableDeclaration* leaf) {}
-void ATAV::VisitFunctionDeclaration(FunctionDeclaration* leaf) {}
-void ATAV::VisitModuleDeclaration(ModuleDeclaration* leaf) {}
-void ATAV::VisitImportDeclaration(ImportDeclaration* leaf) {}
-void ATAV::VisitExportDeclaration(ExportDeclaration* leaf) {}
-void ATAV::VisitModuleVariable(ModuleVariable* leaf) {}
-void ATAV::VisitModulePath(ModulePath* leaf) {}
-void ATAV::VisitModuleUrl(ModuleUrl* leaf) {}
-void ATAV::VisitEmptyStatement(EmptyStatement* leaf) {}
-void ATAV::VisitContinueStatement(ContinueStatement* leaf) {}
-void ATAV::VisitBreakStatement(BreakStatement* leaf) {}
-void ATAV::VisitDebuggerStatement(DebuggerStatement* leaf) {}
-void ATAV::VisitFunctionLiteral(FunctionLiteral* leaf) {}
-void ATAV::VisitNativeFunctionLiteral(NativeFunctionLiteral* leaf) {}
-void ATAV::VisitLiteral(Literal* leaf) {}
-void ATAV::VisitRegExpLiteral(RegExpLiteral* leaf) {}
-void ATAV::VisitThisFunction(ThisFunction* leaf) {}
-
-// ---------------------------------------------------------------------------
-// -- Pass-through nodes------------------------------------------------------
-// ---------------------------------------------------------------------------
-void ATAV::VisitModuleLiteral(ModuleLiteral* e) { Visit(e->body()); }
-
-
-void ATAV::VisitBlock(Block* stmt) { VisitStatements(stmt->statements()); }
-
-
-void ATAV::VisitExpressionStatement(ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void ATAV::VisitIfStatement(IfStatement* stmt) {
- Visit(stmt->condition());
- Visit(stmt->then_statement());
- Visit(stmt->else_statement());
-}
-
-
-void ATAV::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void ATAV::VisitWithStatement(WithStatement* stmt) {
- Visit(stmt->expression());
- Visit(stmt->statement());
-}
-
-
-void ATAV::VisitSwitchStatement(SwitchStatement* stmt) {
- Visit(stmt->tag());
- ZoneList<CaseClause*>* clauses = stmt->cases();
- for (int i = 0; i < clauses->length(); i++) {
- Visit(clauses->at(i));
- }
-}
-
-
-void ATAV::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- Visit(stmt->try_block());
- Visit(stmt->finally_block());
-}
-
-
-void ATAV::VisitClassLiteral(ClassLiteral* e) {
- VisitIfNotNull(e->extends());
- Visit(e->constructor());
- ZoneList<ObjectLiteralProperty*>* properties = e->properties();
- for (int i = 0; i < properties->length(); i++) {
- Visit(properties->at(i)->value());
- }
-}
-
-
-void ATAV::VisitConditional(Conditional* e) {
- Visit(e->condition());
- Visit(e->then_expression());
- Visit(e->else_expression());
-}
-
-
-void ATAV::VisitObjectLiteral(ObjectLiteral* e) {
- ZoneList<ObjectLiteralProperty*>* properties = e->properties();
- for (int i = 0; i < properties->length(); i++) {
- Visit(properties->at(i)->value());
- }
-}
-
-
-void ATAV::VisitArrayLiteral(ArrayLiteral* e) { VisitExpressions(e->values()); }
-
-
-void ATAV::VisitYield(Yield* stmt) {
- Visit(stmt->generator_object());
- Visit(stmt->expression());
-}
-
-
-void ATAV::VisitThrow(Throw* stmt) { Visit(stmt->exception()); }
-
-
-void ATAV::VisitProperty(Property* e) {
- Visit(e->obj());
- Visit(e->key());
-}
-
-
-void ATAV::VisitCall(Call* e) {
- Visit(e->expression());
- VisitExpressions(e->arguments());
-}
-
-
-void ATAV::VisitCallRuntime(CallRuntime* e) {
- VisitExpressions(e->arguments());
-}
-
-
-void ATAV::VisitUnaryOperation(UnaryOperation* e) { Visit(e->expression()); }
-
-
-void ATAV::VisitBinaryOperation(BinaryOperation* e) {
- Visit(e->left());
- Visit(e->right());
-}
-
-
-void ATAV::VisitCompareOperation(CompareOperation* e) {
- Visit(e->left());
- Visit(e->right());
-}
-
-
-void ATAV::VisitCaseClause(CaseClause* cc) {
- if (!cc->is_default()) Visit(cc->label());
- VisitStatements(cc->statements());
-}
-
-
-void ATAV::VisitModuleStatement(ModuleStatement* stmt) { Visit(stmt->body()); }
-
-
-void ATAV::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Visit(stmt->try_block());
- Visit(stmt->catch_block());
-}
-
-
-void ATAV::VisitDoWhileStatement(DoWhileStatement* loop) {
- Visit(loop->body());
- Visit(loop->cond());
-}
-
-
-void ATAV::VisitWhileStatement(WhileStatement* loop) {
- Visit(loop->cond());
- Visit(loop->body());
-}
-
-
-void ATAV::VisitForStatement(ForStatement* loop) {
- VisitIfNotNull(loop->init());
- VisitIfNotNull(loop->cond());
- Visit(loop->body());
- VisitIfNotNull(loop->next());
-}
-
-
-void ATAV::VisitForInStatement(ForInStatement* loop) {
- Visit(loop->each());
- Visit(loop->subject());
- Visit(loop->body());
-}
-
-
-void ATAV::VisitForOfStatement(ForOfStatement* loop) {
- Visit(loop->each());
- Visit(loop->subject());
- Visit(loop->body());
-}
-
-
-void ATAV::VisitAssignment(Assignment* stmt) {
- Expression* l = stmt->target();
- Visit(l);
- Visit(stmt->value());
-}
-
-
-void ATAV::VisitCountOperation(CountOperation* e) {
- Expression* l = e->expression();
- Visit(l);
-}
-}
-} // namespace v8::internal
diff --git a/deps/v8/src/ast-this-access-visitor.h b/deps/v8/src/ast-this-access-visitor.h
deleted file mode 100644
index 60309815ed..0000000000
--- a/deps/v8/src/ast-this-access-visitor.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_THIS_ACCESS_VISITOR_H_
-#define V8_AST_THIS_ACCESS_VISITOR_H_
-#include "src/ast.h"
-
-namespace v8 {
-namespace internal {
-
-class AstThisAccessVisitor : public AstVisitor {
- public:
- explicit AstThisAccessVisitor(Zone* zone);
-
- bool UsesThis() { return uses_this_; }
-
-#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- bool uses_this_;
-
- void VisitIfNotNull(AstNode* node) {
- if (node != NULL) Visit(node);
- }
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
- DISALLOW_COPY_AND_ASSIGN(AstThisAccessVisitor);
-};
-}
-} // namespace v8::internal
-#endif // V8_AST_THIS_ACCESS_VISITOR_H_
diff --git a/deps/v8/src/ast-value-factory.h b/deps/v8/src/ast-value-factory.h
index a189f173b5..fe333254bb 100644
--- a/deps/v8/src/ast-value-factory.h
+++ b/deps/v8/src/ast-value-factory.h
@@ -88,8 +88,6 @@ class AstRawString : public AstString {
return *c;
}
- V8_INLINE bool IsArguments(AstValueFactory* ast_value_factory) const;
-
// For storing AstRawStrings in a hash map.
uint32_t hash() const {
return hash_;
@@ -249,17 +247,19 @@ class AstValue : public ZoneObject {
F(get_template_callsite, "GetTemplateCallSite") \
F(initialize_const_global, "initializeConstGlobal") \
F(initialize_var_global, "initializeVarGlobal") \
+ F(is_construct_call, "_IsConstructCall") \
F(let, "let") \
F(make_reference_error, "MakeReferenceErrorEmbedded") \
F(make_syntax_error, "MakeSyntaxErrorEmbedded") \
F(make_type_error, "MakeTypeErrorEmbedded") \
- F(module, "module") \
F(native, "native") \
+ F(new_target, "new.target") \
F(next, "next") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(this, "this") \
F(use_asm, "use asm") \
+ F(use_strong, "use strong") \
F(use_strict, "use strict") \
F(value, "value")
@@ -355,11 +355,6 @@ class AstValueFactory {
OTHER_CONSTANTS(F)
#undef F
};
-
-
-bool AstRawString::IsArguments(AstValueFactory* ast_value_factory) const {
- return ast_value_factory->arguments_string() == this;
-}
} } // namespace v8::internal
#undef STRING_CONSTANTS
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 6329371faa..ac81e751af 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -65,30 +65,22 @@ VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
- raw_name_(var->raw_name()),
- interface_(var->interface()) {
+ raw_name_(var->raw_name()) {
BindTo(var);
}
VariableProxy::VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
- Interface* interface, int position)
+ int position)
: Expression(zone, position),
bit_field_(IsThisField::encode(is_this) | IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
- raw_name_(name),
- interface_(interface) {}
+ raw_name_(name) {}
void VariableProxy::BindTo(Variable* var) {
- DCHECK(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
DCHECK((is_this() && var->is_this()) || raw_name() == var->raw_name());
- // Ideally CONST-ness should match. However, this is very hard to achieve
- // because we don't know the exact semantics of conflicting (const and
- // non-const) multiple variable declarations, const vars introduced via
- // eval() etc. Const-ness and variable declarations are a complete mess
- // in JS. Sigh...
set_var(var);
set_is_resolved();
var->set_is_used();
@@ -146,8 +138,8 @@ int FunctionLiteral::end_position() const {
}
-StrictMode FunctionLiteral::strict_mode() const {
- return scope()->strict_mode();
+LanguageMode FunctionLiteral::language_mode() const {
+ return scope()->language_mode();
}
@@ -157,13 +149,6 @@ bool FunctionLiteral::uses_super_property() const {
}
-bool FunctionLiteral::uses_super_constructor_call() const {
- DCHECK_NOT_NULL(scope());
- return scope()->uses_super_constructor_call() ||
- scope()->inner_uses_super_constructor_call();
-}
-
-
// Helper to find an existing shared function info in the baseline code for the
// given function literal. Used to canonicalize SharedFunctionInfo objects.
void FunctionLiteral::InitializeSharedInfo(
@@ -183,15 +168,29 @@ void FunctionLiteral::InitializeSharedInfo(
}
-ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone,
- AstValueFactory* ast_value_factory,
- Literal* key, Expression* value,
- bool is_static) {
- emit_store_ = true;
- key_ = key;
- value_ = value;
- is_static_ = is_static;
- if (key->raw_value()->EqualsString(ast_value_factory->proto_string())) {
+ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
+ Kind kind, bool is_static,
+ bool is_computed_name)
+ : key_(key),
+ value_(value),
+ kind_(kind),
+ emit_store_(true),
+ is_static_(is_static),
+ is_computed_name_(is_computed_name) {}
+
+
+ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
+ Expression* key, Expression* value,
+ bool is_static,
+ bool is_computed_name)
+ : key_(key),
+ value_(value),
+ emit_store_(true),
+ is_static_(is_static),
+ is_computed_name_(is_computed_name) {
+ if (!is_computed_name &&
+ key->AsLiteral()->raw_value()->EqualsString(
+ ast_value_factory->proto_string())) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
@@ -203,16 +202,6 @@ ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone,
}
-ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone, bool is_getter,
- FunctionLiteral* value,
- bool is_static) {
- emit_store_ = true;
- value_ = value;
- kind_ = is_getter ? GETTER : SETTER;
- is_static_ = is_static;
-}
-
-
bool ObjectLiteral::Property::IsCompileTimeValue() {
return kind_ == CONSTANT ||
(kind_ == MATERIALIZED_LITERAL &&
@@ -231,25 +220,33 @@ bool ObjectLiteral::Property::emit_store() {
void ObjectLiteral::CalculateEmitStore(Zone* zone) {
+ const auto GETTER = ObjectLiteral::Property::GETTER;
+ const auto SETTER = ObjectLiteral::Property::SETTER;
+
ZoneAllocationPolicy allocator(zone);
ZoneHashMap table(Literal::Match, ZoneHashMap::kDefaultHashMapCapacity,
allocator);
for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i);
- Literal* literal = property->key();
- if (literal->value()->IsNull()) continue;
+ if (property->is_computed_name()) continue;
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) continue;
+ Literal* literal = property->key()->AsLiteral();
+ DCHECK(!literal->value()->IsNull());
+
+ // If there is an existing entry do not emit a store unless the previous
+ // entry was also an accessor.
uint32_t hash = literal->Hash();
- // If the key of a computed property is in the table, do not emit
- // a store for the property later.
- if ((property->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL ||
- property->kind() == ObjectLiteral::Property::COMPUTED) &&
- table.Lookup(literal, hash, false, allocator) != NULL) {
- property->set_emit_store(false);
- } else {
- // Add key to the table.
- table.Lookup(literal, hash, true, allocator);
+ ZoneHashMap::Entry* entry = table.Lookup(literal, hash, true, allocator);
+ if (entry->value != NULL) {
+ auto previous_kind =
+ static_cast<ObjectLiteral::Property*>(entry->value)->kind();
+ if (!((property->kind() == GETTER && previous_kind == SETTER) ||
+ (property->kind() == SETTER && previous_kind == GETTER))) {
+ property->set_emit_store(false);
+ }
}
+ entry->value = property;
}
}
@@ -279,6 +276,13 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
is_simple = false;
continue;
}
+
+ if (position == boilerplate_properties_ * 2) {
+ DCHECK(property->is_computed_name());
+ break;
+ }
+ DCHECK(!property->is_computed_name());
+
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->BuildConstants(isolate);
@@ -288,7 +292,7 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
// Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
// value for COMPUTED properties, the real value is filled in at
// runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->value();
+ Handle<Object> key = property->key()->AsLiteral()->value();
Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
// Ensure objects that may, at any point in time, contain fields with double
@@ -417,16 +421,6 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
}
-void TargetCollector::AddTarget(Label* target, Zone* zone) {
- // Add the label to the collector, but discard duplicates.
- int length = targets_.length();
- for (int i = 0; i < length; i++) {
- if (targets_[i] == target) return;
- }
- targets_.Add(target, zone);
-}
-
-
void UnaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
// TODO(olivf) If this Operation is used in a test context, then the
// expression has a ToBoolean stub and we want to collect the type
@@ -447,31 +441,6 @@ void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
-bool BinaryOperation::ResultOverwriteAllowed() const {
- switch (op()) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- return false;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- return true;
- default:
- UNREACHABLE();
- }
- return false;
-}
-
-
static bool IsTypeof(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
@@ -576,15 +545,28 @@ void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
-bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
+bool Call::IsUsingCallFeedbackICSlot(Isolate* isolate) const {
CallType call_type = GetCallType(isolate);
- return (call_type != POSSIBLY_EVAL_CALL);
+ if (IsUsingCallFeedbackSlot(isolate) || call_type == POSSIBLY_EVAL_CALL) {
+ return false;
+ }
+ return true;
+}
+
+
+bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
+ // SuperConstructorCall uses a CallConstructStub, which wants
+ // a Slot, not an IC slot.
+ return GetCallType(isolate) == SUPER_CALL;
}
FeedbackVectorRequirements Call::ComputeFeedbackRequirements(Isolate* isolate) {
- int ic_slots = IsUsingCallFeedbackSlot(isolate) ? 1 : 0;
- return FeedbackVectorRequirements(0, ic_slots);
+ int ic_slots = IsUsingCallFeedbackICSlot(isolate) ? 1 : 0;
+ int slots = IsUsingCallFeedbackSlot(isolate) ? 1 : 0;
+ // A Call uses either a slot or an IC slot.
+ DCHECK((ic_slots & slots) == 0);
+ return FeedbackVectorRequirements(slots, ic_slots);
}
@@ -640,7 +622,8 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeFeedbackId id = key()->LiteralFeedbackId();
+ DCHECK(!is_computed_name());
+ TypeFeedbackId id = key()->AsLiteral()->LiteralFeedbackId();
SmallMapList maps;
oracle->CollectReceiverTypes(id, &maps);
receiver_type_ = maps.length() == 1 ? maps.at(0)
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 43bde6ad25..faccb90457 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -11,10 +11,10 @@
#include "src/ast-value-factory.h"
#include "src/bailout-reason.h"
#include "src/factory.h"
-#include "src/interface.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/list-inl.h"
+#include "src/modules.h"
#include "src/runtime/runtime.h"
#include "src/small-pointer-list.h"
#include "src/smart-pointers.h"
@@ -22,7 +22,6 @@
#include "src/types.h"
#include "src/utils.h"
#include "src/variables.h"
-#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -49,7 +48,6 @@ namespace internal {
#define MODULE_NODE_LIST(V) \
V(ModuleLiteral) \
- V(ModuleVariable) \
V(ModulePath) \
V(ModuleUrl)
@@ -114,7 +112,6 @@ class Expression;
class IterationStatement;
class MaterializedLiteral;
class Statement;
-class TargetCollector;
class TypeFeedbackOracle;
class RegExpAlternative;
@@ -202,9 +199,7 @@ class AstNode: public ZoneObject {
};
#undef DECLARE_TYPE_ENUM
- void* operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
- }
+ void* operator new(size_t size, Zone* zone) { return zone->New(size); }
explicit AstNode(int position): position_(position) {}
virtual ~AstNode() {}
@@ -225,7 +220,6 @@ class AstNode: public ZoneObject {
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
- virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
@@ -340,10 +334,6 @@ class Expression : public AstNode {
// names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() const { return false; }
- // True iff the result can be safely overwritten (to avoid allocation).
- // False for operations that can return one of their operands.
- virtual bool ResultOverwriteAllowed() const { return false; }
-
// True iff the expression is a literal represented as a smi.
bool IsSmiLiteral() const;
@@ -587,8 +577,7 @@ class FunctionDeclaration FINAL : public Declaration {
int pos)
: Declaration(zone, proxy, mode, scope, pos),
fun_(fun) {
- // At the moment there are no "const functions" in JavaScript...
- DCHECK(mode == VAR || mode == LET);
+ DCHECK(mode == VAR || mode == LET || mode == CONST);
DCHECK(fun != NULL);
}
@@ -607,14 +596,9 @@ class ModuleDeclaration FINAL : public Declaration {
}
protected:
- ModuleDeclaration(Zone* zone,
- VariableProxy* proxy,
- Module* module,
- Scope* scope,
- int pos)
- : Declaration(zone, proxy, MODULE, scope, pos),
- module_(module) {
- }
+ ModuleDeclaration(Zone* zone, VariableProxy* proxy, Module* module,
+ Scope* scope, int pos)
+ : Declaration(zone, proxy, CONST, scope, pos), module_(module) {}
private:
Module* module_;
@@ -661,21 +645,17 @@ class ExportDeclaration FINAL : public Declaration {
class Module : public AstNode {
public:
- Interface* interface() const { return interface_; }
+ ModuleDescriptor* descriptor() const { return descriptor_; }
Block* body() const { return body_; }
protected:
Module(Zone* zone, int pos)
- : AstNode(pos),
- interface_(Interface::NewModule(zone)),
- body_(NULL) {}
- Module(Zone* zone, Interface* interface, int pos, Block* body = NULL)
- : AstNode(pos),
- interface_(interface),
- body_(body) {}
+ : AstNode(pos), descriptor_(ModuleDescriptor::New(zone)), body_(NULL) {}
+ Module(Zone* zone, ModuleDescriptor* descriptor, int pos, Block* body = NULL)
+ : AstNode(pos), descriptor_(descriptor), body_(body) {}
private:
- Interface* interface_;
+ ModuleDescriptor* descriptor_;
Block* body_;
};
@@ -685,22 +665,8 @@ class ModuleLiteral FINAL : public Module {
DECLARE_NODE_TYPE(ModuleLiteral)
protected:
- ModuleLiteral(Zone* zone, Block* body, Interface* interface, int pos)
- : Module(zone, interface, pos, body) {}
-};
-
-
-class ModuleVariable FINAL : public Module {
- public:
- DECLARE_NODE_TYPE(ModuleVariable)
-
- VariableProxy* proxy() const { return proxy_; }
-
- protected:
- inline ModuleVariable(Zone* zone, VariableProxy* proxy, int pos);
-
- private:
- VariableProxy* proxy_;
+ ModuleLiteral(Zone* zone, Block* body, ModuleDescriptor* descriptor, int pos)
+ : Module(zone, descriptor, pos, body) {}
};
@@ -741,18 +707,13 @@ class ModuleStatement FINAL : public Statement {
public:
DECLARE_NODE_TYPE(ModuleStatement)
- VariableProxy* proxy() const { return proxy_; }
Block* body() const { return body_; }
protected:
- ModuleStatement(Zone* zone, VariableProxy* proxy, Block* body, int pos)
- : Statement(zone, pos),
- proxy_(proxy),
- body_(body) {
- }
+ ModuleStatement(Zone* zone, Block* body, int pos)
+ : Statement(zone, pos), body_(body) {}
private:
- VariableProxy* proxy_;
Block* body_;
};
@@ -935,11 +896,12 @@ class ForInStatement FINAL : public ForEachStatement {
ForInType for_in_type() const { return for_in_type_; }
void set_for_in_type(ForInType type) { for_in_type_ = type; }
- static int num_ids() { return parent_num_ids() + 4; }
+ static int num_ids() { return parent_num_ids() + 5; }
BailoutId BodyId() const { return BailoutId(local_id(0)); }
BailoutId PrepareId() const { return BailoutId(local_id(1)); }
BailoutId EnumId() const { return BailoutId(local_id(2)); }
BailoutId ToObjectId() const { return BailoutId(local_id(3)); }
+ BailoutId AssignmentId() const { return BailoutId(local_id(4)); }
BailoutId ContinueId() const OVERRIDE { return EntryId(); }
BailoutId StackCheckId() const OVERRIDE { return BodyId(); }
@@ -1104,19 +1066,32 @@ class WithStatement FINAL : public Statement {
Expression* expression() const { return expression_; }
Statement* statement() const { return statement_; }
+ void set_base_id(int id) { base_id_ = id; }
+ static int num_ids() { return parent_num_ids() + 1; }
+ BailoutId EntryId() const { return BailoutId(local_id(0)); }
+
protected:
- WithStatement(
- Zone* zone, Scope* scope,
- Expression* expression, Statement* statement, int pos)
+ WithStatement(Zone* zone, Scope* scope, Expression* expression,
+ Statement* statement, int pos)
: Statement(zone, pos),
scope_(scope),
expression_(expression),
- statement_(statement) { }
+ statement_(statement),
+ base_id_(BailoutId::None().ToInt()) {}
+ static int parent_num_ids() { return 0; }
+
+ int base_id() const {
+ DCHECK(!BailoutId(base_id_).IsNone());
+ return base_id_;
+ }
private:
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
Scope* scope_;
Expression* expression_;
Statement* statement_;
+ int base_id_;
};
@@ -1230,53 +1205,20 @@ class IfStatement FINAL : public Statement {
};
-// NOTE: TargetCollectors are represented as nodes to fit in the target
-// stack in the compiler; this should probably be reworked.
-class TargetCollector FINAL : public AstNode {
- public:
- explicit TargetCollector(Zone* zone)
- : AstNode(RelocInfo::kNoPosition), targets_(0, zone) { }
-
- // Adds a jump target to the collector. The collector stores a pointer not
- // a copy of the target to make binding work, so make sure not to pass in
- // references to something on the stack.
- void AddTarget(Label* target, Zone* zone);
-
- // Virtual behaviour. TargetCollectors are never part of the AST.
- void Accept(AstVisitor* v) OVERRIDE { UNREACHABLE(); }
- NodeType node_type() const OVERRIDE { return kInvalid; }
- TargetCollector* AsTargetCollector() OVERRIDE { return this; }
-
- ZoneList<Label*>* targets() { return &targets_; }
-
- private:
- ZoneList<Label*> targets_;
-};
-
-
class TryStatement : public Statement {
public:
- void set_escaping_targets(ZoneList<Label*>* targets) {
- escaping_targets_ = targets;
- }
-
int index() const { return index_; }
Block* try_block() const { return try_block_; }
- ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
protected:
TryStatement(Zone* zone, int index, Block* try_block, int pos)
- : Statement(zone, pos),
- index_(index),
- try_block_(try_block),
- escaping_targets_(NULL) { }
+ : Statement(zone, pos), index_(index), try_block_(try_block) {}
private:
// Unique (per-function) index of this handler. This is not an AST ID.
int index_;
Block* try_block_;
- ZoneList<Label*>* escaping_targets_;
};
@@ -1467,10 +1409,7 @@ class ObjectLiteralProperty FINAL : public ZoneObject {
PROTOTYPE // Property is __proto__.
};
- ObjectLiteralProperty(Zone* zone, AstValueFactory* ast_value_factory,
- Literal* key, Expression* value, bool is_static);
-
- Literal* key() { return key_; }
+ Expression* key() { return key_; }
Expression* value() { return value_; }
Kind kind() { return kind_; }
@@ -1485,20 +1424,24 @@ class ObjectLiteralProperty FINAL : public ZoneObject {
bool emit_store();
bool is_static() const { return is_static_; }
+ bool is_computed_name() const { return is_computed_name_; }
protected:
friend class AstNodeFactory;
- ObjectLiteralProperty(Zone* zone, bool is_getter, FunctionLiteral* value,
- bool is_static);
- void set_key(Literal* key) { key_ = key; }
+ ObjectLiteralProperty(Expression* key, Expression* value, Kind kind,
+ bool is_static, bool is_computed_name);
+ ObjectLiteralProperty(AstValueFactory* ast_value_factory, Expression* key,
+ Expression* value, bool is_static,
+ bool is_computed_name);
private:
- Literal* key_;
+ Expression* key_;
Expression* value_;
Kind kind_;
bool emit_store_;
bool is_static_;
+ bool is_computed_name_;
Handle<Map> receiver_type_;
};
@@ -1551,7 +1494,12 @@ class ObjectLiteral FINAL : public MaterializedLiteral {
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
- static int num_ids() { return parent_num_ids() + 1; }
+ // Return an AST id for a property that is used in simulate instructions.
+ BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 1)); }
+
+ // Unlike other AST nodes, this number of bailout IDs allocated for an
+ // ObjectLiteral can vary, so num_ids() is not a static method.
+ int num_ids() const { return parent_num_ids() + 1 + properties()->length(); }
protected:
ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
@@ -1607,12 +1555,14 @@ class ArrayLiteral FINAL : public MaterializedLiteral {
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
- // Unlike other AST nodes, this number of bailout IDs allocated for an
- // ArrayLiteral can vary, so num_ids() is not a static method.
- int num_ids() const { return parent_num_ids() + values()->length(); }
+ BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
// Return an AST id for an element that is used in simulate instructions.
- BailoutId GetIdForElement(int i) { return BailoutId(local_id(i)); }
+ BailoutId GetIdForElement(int i) { return BailoutId(local_id(i + 1)); }
+
+ // Unlike other AST nodes, this number of bailout IDs allocated for an
+ // ArrayLiteral can vary, so num_ids() is not a static method.
+ int num_ids() const { return parent_num_ids() + 1 + values()->length(); }
// Populate the constant elements fixed array.
void BuildConstantElements(Isolate* isolate);
@@ -1681,9 +1631,7 @@ class VariableProxy FINAL : public Expression {
bit_field_ = IsResolvedField::update(bit_field_, true);
}
- Interface* interface() const { return interface_; }
-
- // Bind this proxy to the variable var. Interfaces must match.
+ // Bind this proxy to the variable var.
void BindTo(Variable* var);
bool UsesVariableFeedbackSlot() const {
@@ -1708,7 +1656,7 @@ class VariableProxy FINAL : public Expression {
VariableProxy(Zone* zone, Variable* var, int position);
VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
- Interface* interface, int position);
+ int position);
class IsThisField : public BitField8<bool, 0, 1> {};
class IsAssignedField : public BitField8<bool, 1, 1> {};
@@ -1722,7 +1670,6 @@ class VariableProxy FINAL : public Expression {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
};
- Interface* interface_;
};
@@ -1827,14 +1774,21 @@ class Call FINAL : public Expression {
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) OVERRIDE;
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
- call_feedback_slot_ = slot;
+ ic_slot_or_slot_ = slot.ToInt();
+ }
+ void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
+ ic_slot_or_slot_ = slot.ToInt();
}
Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::CALL_IC; }
- bool HasCallFeedbackSlot() const { return !call_feedback_slot_.IsInvalid(); }
- FeedbackVectorICSlot CallFeedbackSlot() const {
- DCHECK(!call_feedback_slot_.IsInvalid());
- return call_feedback_slot_;
+ FeedbackVectorSlot CallFeedbackSlot() const {
+ DCHECK(ic_slot_or_slot_ != FeedbackVectorSlot::Invalid().ToInt());
+ return FeedbackVectorSlot(ic_slot_or_slot_);
+ }
+
+ FeedbackVectorICSlot CallFeedbackICSlot() const {
+ DCHECK(ic_slot_or_slot_ != FeedbackVectorICSlot::Invalid().ToInt());
+ return FeedbackVectorICSlot(ic_slot_or_slot_);
}
SmallMapList* GetReceiverTypes() OVERRIDE {
@@ -1895,6 +1849,7 @@ class Call FINAL : public Expression {
// Helpers to determine how to handle the call.
CallType GetCallType(Isolate* isolate) const;
bool IsUsingCallFeedbackSlot(Isolate* isolate) const;
+ bool IsUsingCallFeedbackICSlot(Isolate* isolate) const;
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
@@ -1905,7 +1860,7 @@ class Call FINAL : public Expression {
Call(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
int pos)
: Expression(zone, pos),
- call_feedback_slot_(FeedbackVectorICSlot::Invalid()),
+ ic_slot_or_slot_(FeedbackVectorICSlot::Invalid().ToInt()),
expression_(expression),
arguments_(arguments),
bit_field_(IsUninitializedField::encode(false)) {
@@ -1918,7 +1873,9 @@ class Call FINAL : public Expression {
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- FeedbackVectorICSlot call_feedback_slot_;
+ // We store this as an integer because we don't know if we have a slot or
+ // an ic slot until scoping time.
+ int ic_slot_or_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
@@ -2082,8 +2039,6 @@ class BinaryOperation FINAL : public Expression {
public:
DECLARE_NODE_TYPE(BinaryOperation)
- bool ResultOverwriteAllowed() const OVERRIDE;
-
Token::Value op() const { return static_cast<Token::Value>(op_); }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
@@ -2170,13 +2125,14 @@ class CountOperation FINAL : public Expression {
}
void set_type(Type* type) { type_ = type; }
- static int num_ids() { return parent_num_ids() + 3; }
+ static int num_ids() { return parent_num_ids() + 4; }
BailoutId AssignmentId() const { return BailoutId(local_id(0)); }
+ BailoutId ToNumberId() const { return BailoutId(local_id(1)); }
TypeFeedbackId CountBinOpFeedbackId() const {
- return TypeFeedbackId(local_id(1));
+ return TypeFeedbackId(local_id(2));
}
TypeFeedbackId CountStoreFeedbackId() const {
- return TypeFeedbackId(local_id(2));
+ return TypeFeedbackId(local_id(3));
}
protected:
@@ -2479,9 +2435,8 @@ class FunctionLiteral FINAL : public Expression {
int SourceSize() const { return end_position() - start_position(); }
bool is_expression() const { return IsExpression::decode(bitfield_); }
bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
- StrictMode strict_mode() const;
+ LanguageMode language_mode() const;
bool uses_super_property() const;
- bool uses_super_constructor_call() const;
static bool NeedsHomeObject(Expression* literal) {
return literal != NULL && literal->IsFunctionLiteral() &&
@@ -2557,18 +2512,6 @@ class FunctionLiteral FINAL : public Expression {
}
FunctionKind kind() { return FunctionKindBits::decode(bitfield_); }
- bool is_arrow() {
- return IsArrowFunction(FunctionKindBits::decode(bitfield_));
- }
- bool is_generator() {
- return IsGeneratorFunction(FunctionKindBits::decode(bitfield_));
- }
- bool is_concise_method() {
- return IsConciseMethod(FunctionKindBits::decode(bitfield_));
- }
- bool is_default_constructor() {
- return IsDefaultConstructor(FunctionKindBits::decode(bitfield_));
- }
int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags* flags() { return ast_properties_.flags(); }
@@ -2639,7 +2582,7 @@ class FunctionLiteral FINAL : public Expression {
class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
class IsParenthesized : public BitField<IsParenthesizedFlag, 5, 1> {};
- class FunctionKindBits : public BitField<FunctionKind, 6, 4> {};
+ class FunctionKindBits : public BitField<FunctionKind, 6, 7> {};
};
@@ -2654,15 +2597,26 @@ class ClassLiteral FINAL : public Expression {
Scope* scope() const { return scope_; }
VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
Expression* extends() const { return extends_; }
- Expression* constructor() const { return constructor_; }
+ FunctionLiteral* constructor() const { return constructor_; }
ZoneList<Property*>* properties() const { return properties_; }
int start_position() const { return position(); }
int end_position() const { return end_position_; }
+ BailoutId EntryId() const { return BailoutId(local_id(0)); }
+ BailoutId DeclsId() const { return BailoutId(local_id(1)); }
+ BailoutId ExitId() { return BailoutId(local_id(2)); }
+
+ // Return an AST id for a property that is used in simulate instructions.
+ BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 3)); }
+
+ // Unlike other AST nodes, this number of bailout IDs allocated for an
+ // ClassLiteral can vary, so num_ids() is not a static method.
+ int num_ids() const { return parent_num_ids() + 3 + properties()->length(); }
+
protected:
ClassLiteral(Zone* zone, const AstRawString* name, Scope* scope,
VariableProxy* class_variable_proxy, Expression* extends,
- Expression* constructor, ZoneList<Property*>* properties,
+ FunctionLiteral* constructor, ZoneList<Property*>* properties,
int start_position, int end_position)
: Expression(zone, start_position),
raw_name_(name),
@@ -2672,13 +2626,16 @@ class ClassLiteral FINAL : public Expression {
constructor_(constructor),
properties_(properties),
end_position_(end_position) {}
+ static int parent_num_ids() { return Expression::num_ids(); }
private:
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
const AstRawString* raw_name_;
Scope* scope_;
VariableProxy* class_variable_proxy_;
Expression* extends_;
- Expression* constructor_;
+ FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
int end_position_;
};
@@ -3110,15 +3067,6 @@ class RegExpEmpty FINAL : public RegExpTree {
// ----------------------------------------------------------------------------
-// Out-of-line inline constructors (to side-step cyclic dependencies).
-
-inline ModuleVariable::ModuleVariable(Zone* zone, VariableProxy* proxy, int pos)
- : Module(zone, proxy->interface(), pos),
- proxy_(proxy) {
-}
-
-
-// ----------------------------------------------------------------------------
// Basic visitor
// - leaf node visitors are abstract.
@@ -3155,19 +3103,22 @@ class AstVisitor BASE_EMBEDDED {
\
bool CheckStackOverflow() { \
if (stack_overflow_) return true; \
- StackLimitCheck check(zone_->isolate()); \
+ StackLimitCheck check(isolate_); \
if (!check.HasOverflowed()) return false; \
- return (stack_overflow_ = true); \
+ stack_overflow_ = true; \
+ return true; \
} \
\
private: \
- void InitializeAstVisitor(Zone* zone) { \
+ void InitializeAstVisitor(Isolate* isolate, Zone* zone) { \
+ isolate_ = isolate; \
zone_ = zone; \
stack_overflow_ = false; \
} \
Zone* zone() { return zone_; } \
- Isolate* isolate() { return zone_->isolate(); } \
+ Isolate* isolate() { return isolate_; } \
\
+ Isolate* isolate_; \
Zone* zone_; \
bool stack_overflow_
@@ -3216,12 +3167,9 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
return new (zone_) ExportDeclaration(zone_, proxy, scope, pos);
}
- ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface, int pos) {
- return new (zone_) ModuleLiteral(zone_, body, interface, pos);
- }
-
- ModuleVariable* NewModuleVariable(VariableProxy* proxy, int pos) {
- return new (zone_) ModuleVariable(zone_, proxy, pos);
+ ModuleLiteral* NewModuleLiteral(Block* body, ModuleDescriptor* descriptor,
+ int pos) {
+ return new (zone_) ModuleLiteral(zone_, body, descriptor, pos);
}
ModulePath* NewModulePath(Module* origin, const AstRawString* name, int pos) {
@@ -3265,9 +3213,8 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
return NULL;
}
- ModuleStatement* NewModuleStatement(
- VariableProxy* proxy, Block* body, int pos) {
- return new (zone_) ModuleStatement(zone_, proxy, body, pos);
+ ModuleStatement* NewModuleStatement(Block* body, int pos) {
+ return new (zone_) ModuleStatement(zone_, body, pos);
}
ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
@@ -3377,20 +3324,19 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
boilerplate_properties, has_function, pos);
}
- ObjectLiteral::Property* NewObjectLiteralProperty(Literal* key,
- Expression* value,
- bool is_static) {
- return new (zone_) ObjectLiteral::Property(zone_, ast_value_factory_, key,
- value, is_static);
+ ObjectLiteral::Property* NewObjectLiteralProperty(
+ Expression* key, Expression* value, ObjectLiteralProperty::Kind kind,
+ bool is_static, bool is_computed_name) {
+ return new (zone_)
+ ObjectLiteral::Property(key, value, kind, is_static, is_computed_name);
}
- ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
- FunctionLiteral* value,
- int pos, bool is_static) {
- ObjectLiteral::Property* prop =
- new (zone_) ObjectLiteral::Property(zone_, is_getter, value, is_static);
- prop->set_key(NewStringLiteral(value->raw_name(), pos));
- return prop;
+ ObjectLiteral::Property* NewObjectLiteralProperty(Expression* key,
+ Expression* value,
+ bool is_static,
+ bool is_computed_name) {
+ return new (zone_) ObjectLiteral::Property(ast_value_factory_, key, value,
+ is_static, is_computed_name);
}
RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern,
@@ -3413,9 +3359,8 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
VariableProxy* NewVariableProxy(const AstRawString* name,
bool is_this,
- Interface* interface = Interface::NewValue(),
int position = RelocInfo::kNoPosition) {
- return new (zone_) VariableProxy(zone_, name, is_this, interface, position);
+ return new (zone_) VariableProxy(zone_, name, is_this, position);
}
Property* NewProperty(Expression* obj, Expression* key, int pos) {
@@ -3521,7 +3466,7 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
ClassLiteral* NewClassLiteral(const AstRawString* name, Scope* scope,
VariableProxy* proxy, Expression* extends,
- Expression* constructor,
+ FunctionLiteral* constructor,
ZoneList<ObjectLiteral::Property*>* properties,
int start_position, int end_position) {
return new (zone_)
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index c7602a7def..cb31cc9982 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -25,6 +25,11 @@ BackgroundParsingTask::BackgroundParsingTask(
options == ScriptCompiler::kNoCompileOptions);
source->allow_lazy =
!i::Compiler::DebuggerWantsEagerCompilation(source->info.get());
+
+ if (!source->allow_lazy && options_ == ScriptCompiler::kProduceParserCache) {
+ // Producing cached data while parsing eagerly is not supported.
+ options_ = ScriptCompiler::kNoCompileOptions;
+ }
source->hash_seed = isolate->heap()->HashSeed();
}
@@ -40,15 +45,16 @@ void BackgroundParsingTask::Run() {
source_->info->SetCachedData(&script_data, options_);
}
- uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - stack_size_ * KB;
- Parser::ParseInfo parse_info = {limit, source_->hash_seed,
- &source_->unicode_cache};
+ uintptr_t stack_limit =
+ reinterpret_cast<uintptr_t>(&stack_limit) - stack_size_ * KB;
// Parser needs to stay alive for finalizing the parsing on the main
// thread. Passing &parse_info is OK because Parser doesn't store it.
- source_->parser.Reset(new Parser(source_->info.get(), &parse_info));
+ source_->parser.Reset(new Parser(source_->info.get(), stack_limit,
+ source_->hash_seed,
+ &source_->unicode_cache));
source_->parser->set_allow_lazy(source_->allow_lazy);
- source_->parser->ParseOnBackground();
+ source_->parser->ParseOnBackground(source_->info.get());
if (script_data != NULL) {
source_->cached_data.Reset(new ScriptCompiler::CachedData(
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 7287d629d2..8bfe7a9383 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -8,6 +8,7 @@
namespace v8 {
namespace internal {
+// TODO(svenpanne) introduce an AbortReason and partition this list
#define ERROR_MESSAGES_LIST(V) \
V(kNoReason, "no reason") \
\
@@ -39,6 +40,7 @@ namespace internal {
"BinaryStub_GenerateFloatingPointCode") \
V(kBothRegistersWereSmisInSelectNonSmi, \
"Both registers were smis in SelectNonSmi") \
+ V(kBuiltinFunctionCannotBeOptimized, "Builtin function cannot be optimized") \
V(kCallToAJavaScriptRuntimeFunction, \
"Call to a JavaScript runtime function") \
V(kCannotTranslatePositionInChangedArea, \
@@ -47,6 +49,7 @@ namespace internal {
V(kCodeGenerationFailed, "Code generation failed") \
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
+ V(kComputedPropertyName, "Computed property name") \
V(kContextAllocatedArguments, "Context-allocated arguments") \
V(kCopyBuffersOverlap, "Copy buffers overlap") \
V(kCouldNotGenerateZero, "Could not generate +0.0") \
@@ -217,6 +220,7 @@ namespace internal {
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+ V(kRestParameter, "Rest parameters") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
V(kScopedBlock, "ScopedBlock") \
@@ -226,12 +230,18 @@ namespace internal {
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
+ V(kTheInstructionShouldBeALis, "The instruction should be a lis") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
+ V(kTheInstructionShouldBeAnOris, "The instruction should be an oris") \
+ V(kTheInstructionShouldBeALi, "The instruction should be a li") \
+ V(kTheInstructionShouldBeASldi, "The instruction should be a sldi") \
V(kTheInstructionToPatchShouldBeALoadFromConstantPool, \
"The instruction to patch should be a load from the constant pool") \
V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
"The instruction to patch should be a ldr literal") \
+ V(kTheInstructionToPatchShouldBeALis, \
+ "The instruction to patch should be a lis") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \
@@ -301,7 +311,6 @@ namespace internal {
"Unexpected unused properties of string wrapper") \
V(kUnimplemented, "unimplemented") \
V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
- V(kUnknown, "Unknown") \
V(kUnsupportedConstCompoundAssignment, \
"Unsupported const compound assignment") \
V(kUnsupportedCountOperationWithConst, \
@@ -322,6 +331,9 @@ namespace internal {
V(kWrongFunctionContext, "Wrong context passed to function") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
+ V(kShouldNotDirectlyEnterOsrFunction, \
+ "Should not directly enter OSR-compiled function") \
+ V(kOsrCompileFailed, "OSR compilation failed") \
V(kYield, "Yield")
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 675e43fad0..e76b3d02d2 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -148,6 +148,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "src/base/atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "src/base/atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_PPC
+#include "src/base/atomicops_internals_ppc_gcc.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "src/base/atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
@@ -160,7 +162,7 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
-#if defined(__APPLE__) || defined(__OpenBSD__)
+#if defined(__APPLE__) || defined(__OpenBSD__) || defined(V8_OS_AIX)
#include "src/base/atomicops_internals_atomicword_compat.h"
#endif
diff --git a/deps/v8/src/base/atomicops_internals_ppc_gcc.h b/deps/v8/src/base/atomicops_internals_ppc_gcc.h
new file mode 100644
index 0000000000..daa27b4693
--- /dev/null
+++ b/deps/v8/src/base/atomicops_internals_ppc_gcc.h
@@ -0,0 +1,168 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ for (;;) {
+ Atomic32 old_value = *ptr;
+ Atomic32 new_value = old_value + increment;
+ if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
+ return new_value;
+ // The exchange took place as expected.
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__("sync" : : : "memory"); }
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#ifdef V8_TARGET_ARCH_PPC64
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ for (;;) {
+ Atomic64 old_value = *ptr;
+ Atomic64 new_value = old_value + increment;
+ if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
+ return new_value;
+ // The exchange took place as expected.
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif
+}
+} // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index f52877657c..661bf80e6e 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -48,6 +48,13 @@
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__PPC__) || defined(_ARCH_PPC)
+#define V8_HOST_ARCH_PPC 1
+#if defined(__PPC64__) || defined(_ARCH_PPC64)
+#define V8_HOST_ARCH_64_BIT 1
+#else
+#define V8_HOST_ARCH_32_BIT 1
+#endif
#else
#error "Host architecture was not detected as supported by v8"
#endif
@@ -65,9 +72,9 @@
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -104,6 +111,12 @@
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_ARCH_64_BIT 1
+#elif V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_PPC64
+#define V8_TARGET_ARCH_64_BIT 1
+#else
+#define V8_TARGET_ARCH_32_BIT 1
+#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_ARCH_32_BIT 1
#else
@@ -154,6 +167,10 @@
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_PPC_LE
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_PPC_BE
+#define V8_TARGET_BIG_ENDIAN 1
#else
#error Unknown target architecture endianness
#endif
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index 9755fc13ce..ac1eb55fdd 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -51,4 +51,27 @@
#define WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
+
+// The C++ standard requires that static const members have an out-of-class
+// definition (in a single compilation unit), but MSVC chokes on this (when
+// language extensions, which are required, are enabled). (You're only likely to
+// notice the need for a definition if you take the address of the member or,
+// more commonly, pass it to a function that takes it as a reference argument --
+// probably an STL function.) This macro makes MSVC do the right thing. See
+// http://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx for more
+// information. Use like:
+//
+// In .h file:
+// struct Foo {
+// static const int kBar = 5;
+// };
+//
+// In .cc file:
+// STATIC_CONST_MEMBER_DEFINITION const int Foo::kBar;
+#if V8_HAS_DECLSPEC_SELECTANY
+#define STATIC_CONST_MEMBER_DEFINITION __declspec(selectany)
+#else
+#define STATIC_CONST_MEMBER_DEFINITION
+#endif
+
#endif // V8_BASE_COMPILER_SPECIFIC_H_
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index daf3302365..84cd231f61 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -16,6 +16,15 @@
#if V8_OS_QNX
#include <sys/syspage.h> // cpuinfo
#endif
+#if V8_OS_LINUX && V8_HOST_ARCH_PPC
+#include <elf.h>
+#endif
+#if V8_OS_AIX
+#include <sys/systemcfg.h> // _system_configuration
+#ifndef POWER_8
+#define POWER_8 0x10000
+#endif
+#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
#endif
@@ -312,6 +321,8 @@ CPU::CPU()
has_ssse3_(false),
has_sse41_(false),
has_sse42_(false),
+ is_atom_(false),
+ has_osxsave_(false),
has_avx_(false),
has_fma3_(false),
has_idiva_(false),
@@ -360,8 +371,23 @@ CPU::CPU()
has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+ has_osxsave_ = (cpu_info[2] & 0x08000000) != 0;
has_avx_ = (cpu_info[2] & 0x10000000) != 0;
- if (has_avx_) has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
+ has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
+
+ if (family_ == 0x6) {
+ switch (model_) {
+ case 0x1c: // SLT
+ case 0x26:
+ case 0x36:
+ case 0x27:
+ case 0x35:
+ case 0x37: // SLM
+ case 0x4a:
+ case 0x4d:
+ is_atom_ = true;
+ }
+ }
}
#if V8_HOST_ARCH_IA32
@@ -589,7 +615,69 @@ CPU::CPU()
delete[] part;
}
+#elif V8_HOST_ARCH_PPC
+
+#ifndef USE_SIMULATOR
+#if V8_OS_LINUX
+ // Read processor info from /proc/self/auxv.
+ char* auxv_cpu_type = NULL;
+ FILE* fp = fopen("/proc/self/auxv", "r");
+ if (fp != NULL) {
+#if V8_TARGET_ARCH_PPC64
+ Elf64_auxv_t entry;
+#else
+ Elf32_auxv_t entry;
#endif
+ for (;;) {
+ size_t n = fread(&entry, sizeof(entry), 1, fp);
+ if (n == 0 || entry.a_type == AT_NULL) {
+ break;
+ }
+ if (entry.a_type == AT_PLATFORM) {
+ auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
+ break;
+ }
+ }
+ fclose(fp);
+ }
+
+ part_ = -1;
+ if (auxv_cpu_type) {
+ if (strcmp(auxv_cpu_type, "power8") == 0) {
+ part_ = PPC_POWER8;
+ } else if (strcmp(auxv_cpu_type, "power7") == 0) {
+ part_ = PPC_POWER7;
+ } else if (strcmp(auxv_cpu_type, "power6") == 0) {
+ part_ = PPC_POWER6;
+ } else if (strcmp(auxv_cpu_type, "power5") == 0) {
+ part_ = PPC_POWER5;
+ } else if (strcmp(auxv_cpu_type, "ppc970") == 0) {
+ part_ = PPC_G5;
+ } else if (strcmp(auxv_cpu_type, "ppc7450") == 0) {
+ part_ = PPC_G4;
+ } else if (strcmp(auxv_cpu_type, "pa6t") == 0) {
+ part_ = PPC_PA6T;
+ }
+ }
+
+#elif V8_OS_AIX
+ switch (_system_configuration.implementation) {
+ case POWER_8:
+ part_ = PPC_POWER8;
+ break;
+ case POWER_7:
+ part_ = PPC_POWER7;
+ break;
+ case POWER_6:
+ part_ = PPC_POWER6;
+ break;
+ case POWER_5:
+ part_ = PPC_POWER5;
+ break;
+ }
+#endif // V8_OS_AIX
+#endif // !USE_SIMULATOR
+#endif // V8_HOST_ARCH_PPC
}
} } // namespace v8::base
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index 8c41f9d77a..e8a6c8ea26 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -50,6 +50,8 @@ class CPU FINAL {
int variant() const { return variant_; }
static const int NVIDIA_DENVER = 0x0;
int part() const { return part_; }
+
+ // ARM-specific part codes
static const int ARM_CORTEX_A5 = 0xc05;
static const int ARM_CORTEX_A7 = 0xc07;
static const int ARM_CORTEX_A8 = 0xc08;
@@ -57,6 +59,17 @@ class CPU FINAL {
static const int ARM_CORTEX_A12 = 0xc0c;
static const int ARM_CORTEX_A15 = 0xc0f;
+ // PPC-specific part codes
+ enum {
+ PPC_POWER5,
+ PPC_POWER6,
+ PPC_POWER7,
+ PPC_POWER8,
+ PPC_G4,
+ PPC_G5,
+ PPC_PA6T
+ };
+
// General features
bool has_fpu() const { return has_fpu_; }
@@ -70,8 +83,10 @@ class CPU FINAL {
bool has_ssse3() const { return has_ssse3_; }
bool has_sse41() const { return has_sse41_; }
bool has_sse42() const { return has_sse42_; }
+ bool has_osxsave() const { return has_osxsave_; }
bool has_avx() const { return has_avx_; }
bool has_fma3() const { return has_fma3_; }
+ bool is_atom() const { return is_atom_; }
// arm features
bool has_idiva() const { return has_idiva_; }
@@ -106,6 +121,8 @@ class CPU FINAL {
bool has_ssse3_;
bool has_sse41_;
bool has_sse42_;
+ bool is_atom_;
+ bool has_osxsave_;
bool has_avx_;
bool has_fma3_;
bool has_idiva_;
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index c3f609f980..25d77bb1ec 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -10,14 +10,45 @@
#elif V8_OS_QNX
# include <backtrace.h>
#endif // V8_LIBC_GLIBC || V8_OS_BSD
-#include <stdio.h>
-#include <stdlib.h>
+
+#include <cstdio>
+#include <cstdlib>
#include "src/base/platform/platform.h"
namespace v8 {
namespace base {
+// Explicit instantiations for commonly used comparisons.
+#define DEFINE_MAKE_CHECK_OP_STRING(type) \
+ template std::string* MakeCheckOpString<type, type>( \
+ type const&, type const&, char const*);
+DEFINE_MAKE_CHECK_OP_STRING(int)
+DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
+DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
+DEFINE_MAKE_CHECK_OP_STRING(unsigned int)
+DEFINE_MAKE_CHECK_OP_STRING(unsigned long) // NOLINT(runtime/int)
+DEFINE_MAKE_CHECK_OP_STRING(unsigned long long) // NOLINT(runtime/int)
+DEFINE_MAKE_CHECK_OP_STRING(char const*)
+DEFINE_MAKE_CHECK_OP_STRING(void const*)
+#undef DEFINE_MAKE_CHECK_OP_STRING
+
+
+// Explicit instantiations for floating point checks.
+#define DEFINE_CHECK_OP_IMPL(NAME) \
+ template std::string* Check##NAME##Impl<float, float>( \
+ float const& lhs, float const& rhs, char const* msg); \
+ template std::string* Check##NAME##Impl<double, double>( \
+ double const& lhs, double const& rhs, char const* msg);
+DEFINE_CHECK_OP_IMPL(EQ)
+DEFINE_CHECK_OP_IMPL(NE)
+DEFINE_CHECK_OP_IMPL(LE)
+DEFINE_CHECK_OP_IMPL(LT)
+DEFINE_CHECK_OP_IMPL(GE)
+DEFINE_CHECK_OP_IMPL(GT)
+#undef DEFINE_CHECK_OP_IMPL
+
+
// Attempts to dump a backtrace (if supported).
void DumpBacktrace() {
#if V8_LIBC_GLIBC || V8_OS_BSD
@@ -68,7 +99,8 @@ void DumpBacktrace() {
#endif // V8_LIBC_GLIBC || V8_OS_BSD
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
// Contains protection against recursive calls (faults while handling faults).
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 83c1bb6013..511ebf1e9c 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -5,8 +5,9 @@
#ifndef V8_BASE_LOGGING_H_
#define V8_BASE_LOGGING_H_
-#include <stdint.h>
-#include <string.h>
+#include <cstring>
+#include <sstream>
+#include <string>
#include "src/base/build_config.h"
@@ -31,193 +32,143 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
#endif
-// The CHECK macro checks that the given condition is true; if not, it
-// prints a message to stderr and aborts.
-#define CHECK(condition) do { \
- if (!(condition)) { \
- V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
- } \
- } while (0)
-
-
-// Helper function used by the CHECK_EQ function when given int
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source, int expected,
- const char* value_source, int value) {
- if (expected != value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
- expected_source, value_source, expected, value);
- }
-}
-
-
-// Helper function used by the CHECK_EQ function when given int64_t
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source,
- int64_t expected,
- const char* value_source,
- int64_t value) {
- if (expected != value) {
- // Print int64_t values in hex, as two int32s,
- // to avoid platform-dependencies.
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n#"
- " Expected: 0x%08x%08x\n# Found: 0x%08x%08x",
- expected_source, value_source,
- static_cast<uint32_t>(expected >> 32),
- static_cast<uint32_t>(expected),
- static_cast<uint32_t>(value >> 32),
- static_cast<uint32_t>(value));
- }
-}
-
-
-// Helper function used by the CHECK_NE function when given int
-// arguments. Should not be called directly.
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- int unexpected,
- const char* value_source,
- int value) {
- if (unexpected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
- unexpected_source, value_source, value);
- }
-}
+namespace v8 {
+namespace base {
+// CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by DEBUG, so the check will be executed regardless of
+// compilation mode.
+//
+// We make sure CHECK et al. always evaluates their arguments, as
+// doing CHECK(FunctionWithSideEffect()) is a common idiom.
+#define CHECK(condition) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", #condition); \
+ } \
+ } while (0)
-// Helper function used by the CHECK function when given string
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
- if ((expected == NULL && value != NULL) ||
- (expected != NULL && value == NULL) ||
- (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
- expected_source, value_source, expected, value);
- }
-}
+#ifdef DEBUG
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
- if (expected == value ||
- (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
- expected_source, value_source, value);
- }
-}
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use CHECK_EQ et al below.
+#define CHECK_OP(name, op, lhs, rhs) \
+ do { \
+ if (std::string* _msg = ::v8::base::Check##name##Impl( \
+ (lhs), (rhs), #lhs " " #op " " #rhs)) { \
+ V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
+ delete _msg; \
+ } \
+ } while (0)
+#else
-// Helper function used by the CHECK function when given pointer
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
- if (expected != value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p",
- expected_source, value_source,
- expected, value);
- }
-}
+// Make all CHECK functions discard their log strings to reduce code
+// bloat for official release builds.
+#define CHECK_OP(name, op, lhs, rhs) CHECK((lhs)op(rhs))
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
- if (expected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
- expected_source, value_source, value);
- }
-}
+#endif
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- int64_t expected,
- const char* value_source,
- int64_t value) {
- if (expected == value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
- expected_source, value_source, expected, value);
- }
+// Build the error message string. This is separate from the "Impl"
+// function template because it is not performance critical and so can
+// be out of line, while the "Impl" code should be inline. Caller
+// takes ownership of the returned string.
+template <typename Lhs, typename Rhs>
+std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs,
+ char const* msg) {
+ std::ostringstream ss;
+ ss << msg << " (" << lhs << " vs. " << rhs << ")";
+ return new std::string(ss.str());
}
+// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
+// in logging.cc.
+#define DEFINE_MAKE_CHECK_OP_STRING(type) \
+ extern template std::string* MakeCheckOpString<type, type>( \
+ type const&, type const&, char const*);
+DEFINE_MAKE_CHECK_OP_STRING(int)
+DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
+DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
+DEFINE_MAKE_CHECK_OP_STRING(unsigned int)
+DEFINE_MAKE_CHECK_OP_STRING(unsigned long) // NOLINT(runtime/int)
+DEFINE_MAKE_CHECK_OP_STRING(unsigned long long) // NOLINT(runtime/int)
+DEFINE_MAKE_CHECK_OP_STRING(char const*)
+DEFINE_MAKE_CHECK_OP_STRING(void const*)
+#undef DEFINE_MAKE_CHECK_OP_STRING
+
+
+// Helper functions for CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+// The (float, float) and (double, double) instantiations are explicitly
+// externialized to ensure proper 32/64-bit comparisons on x86.
+#define DEFINE_CHECK_OP_IMPL(NAME, op) \
+ template <typename Lhs, typename Rhs> \
+ V8_INLINE std::string* Check##NAME##Impl(Lhs const& lhs, Rhs const& rhs, \
+ char const* msg) { \
+ return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
+ } \
+ V8_INLINE std::string* Check##NAME##Impl(int lhs, int rhs, \
+ char const* msg) { \
+ return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
+ } \
+ extern template std::string* Check##NAME##Impl<float, float>( \
+ float const& lhs, float const& rhs, char const* msg); \
+ extern template std::string* Check##NAME##Impl<double, double>( \
+ double const& lhs, double const& rhs, char const* msg);
+DEFINE_CHECK_OP_IMPL(EQ, ==)
+DEFINE_CHECK_OP_IMPL(NE, !=)
+DEFINE_CHECK_OP_IMPL(LE, <=)
+DEFINE_CHECK_OP_IMPL(LT, < )
+DEFINE_CHECK_OP_IMPL(GE, >=)
+DEFINE_CHECK_OP_IMPL(GT, > )
+#undef DEFINE_CHECK_OP_IMPL
+
+#define CHECK_EQ(lhs, rhs) CHECK_OP(EQ, ==, lhs, rhs)
+#define CHECK_NE(lhs, rhs) CHECK_OP(NE, !=, lhs, rhs)
+#define CHECK_LE(lhs, rhs) CHECK_OP(LE, <=, lhs, rhs)
+#define CHECK_LT(lhs, rhs) CHECK_OP(LT, <, lhs, rhs)
+#define CHECK_GE(lhs, rhs) CHECK_OP(GE, >=, lhs, rhs)
+#define CHECK_GT(lhs, rhs) CHECK_OP(GT, >, lhs, rhs)
+#define CHECK_NULL(val) CHECK((val) == nullptr)
+#define CHECK_NOT_NULL(val) CHECK((val) != nullptr)
+#define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs))
-#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
- #expected, expected, #value, value)
-
-
-#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
- #unexpected, unexpected, #value, value)
-
-
-#define CHECK_GT(a, b) CHECK((a) > (b))
-#define CHECK_GE(a, b) CHECK((a) >= (b))
-#define CHECK_LT(a, b) CHECK((a) < (b))
-#define CHECK_LE(a, b) CHECK((a) <= (b))
-
-
-namespace v8 {
-namespace base {
// Exposed for making debugging easier (to see where your function is being
// called, just add a call to DumpBacktrace).
void DumpBacktrace();
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
// The DCHECK macro is equivalent to CHECK except that it only
// generates code in debug builds.
#ifdef DEBUG
-#define DCHECK_RESULT(expr) CHECK(expr)
#define DCHECK(condition) CHECK(condition)
#define DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2)
#define DCHECK_NE(v1, v2) CHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2) CHECK_GE(v1, v2)
#define DCHECK_LT(v1, v2) CHECK_LT(v1, v2)
#define DCHECK_LE(v1, v2) CHECK_LE(v1, v2)
+#define DCHECK_NULL(val) CHECK_NULL(val)
+#define DCHECK_NOT_NULL(val) CHECK_NOT_NULL(val)
+#define DCHECK_IMPLIES(v1, v2) CHECK_IMPLIES(v1, v2)
#else
-#define DCHECK_RESULT(expr) (expr)
#define DCHECK(condition) ((void) 0)
#define DCHECK_EQ(v1, v2) ((void) 0)
#define DCHECK_NE(v1, v2) ((void) 0)
#define DCHECK_GE(v1, v2) ((void) 0)
#define DCHECK_LT(v1, v2) ((void) 0)
#define DCHECK_LE(v1, v2) ((void) 0)
-#endif
-
-#define DCHECK_NOT_NULL(p) DCHECK_NE(NULL, p)
-
-// "Extra checks" are lightweight checks that are enabled in some release
-// builds.
-#ifdef ENABLE_EXTRA_CHECKS
-#define EXTRA_CHECK(condition) CHECK(condition)
-#else
-#define EXTRA_CHECK(condition) ((void) 0)
+#define DCHECK_NULL(val) ((void) 0)
+#define DCHECK_NOT_NULL(val) ((void) 0)
+#define DCHECK_IMPLIES(v1, v2) ((void) 0)
#endif
#endif // V8_BASE_LOGGING_H_
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 8742519a61..ce75f8bed7 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -346,8 +346,12 @@ inline void USE(T) { }
# define V8_UINT64_C(x) (x ## ULL)
# define V8_INT64_C(x) (x ## LL)
# define V8_INTPTR_C(x) (x)
+#if V8_OS_AIX
+#define V8_PTR_PREFIX "l"
+#else
# define V8_PTR_PREFIX ""
#endif
+#endif
#define V8PRIxPTR V8_PTR_PREFIX "x"
#define V8PRIdPTR V8_PTR_PREFIX "d"
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index 4547b66f7a..b91025a3db 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -182,7 +182,7 @@ void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
// Remove the event from the wait list.
for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
- DCHECK_NE(NULL, *wep);
+ DCHECK(*wep);
if (*wep == event) {
*wep = event->next_;
break;
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
new file mode 100644
index 0000000000..3083f752da
--- /dev/null
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -0,0 +1,292 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform specific code for AIX goes here. For the POSIX comaptible parts
+// the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/ucontext.h>
+
+#include <errno.h>
+#include <fcntl.h> // open
+#include <limits.h>
+#include <stdarg.h>
+#include <strings.h> // index
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <sys/types.h> // mmap & munmap
+#include <unistd.h> // getpagesize
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+static inline void* mmapHelper(size_t len, int prot, int flags, int fildes,
+ off_t off) {
+ void* addr = OS::GetRandomMmapAddr();
+ return mmap(addr, len, prot, flags, fildes, off);
+}
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(floor(time / msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return tzname[0]; // The location of the timezone string on AIX.
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ // On AIX, struct tm does not contain a tm_gmtoff field.
+ time_t utc = time(NULL);
+ DCHECK(utc != -1);
+ struct tm* loc = localtime(&utc);
+ DCHECK(loc != NULL);
+ return static_cast<double>((mktime(loc) - utc) * msPerSecond);
+}
+
+
+void* OS::Allocate(const size_t requested, size_t* allocated, bool executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ void* mbase = mmapHelper(msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) {}
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmapHelper(size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmapHelper(size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+static unsigned StringToLong(char* buffer) {
+ return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ static const int MAP_LENGTH = 1024;
+ int fd = open("/proc/self/maps", O_RDONLY);
+ if (fd < 0) return result;
+ while (true) {
+ char addr_buffer[11];
+ addr_buffer[0] = '0';
+ addr_buffer[1] = 'x';
+ addr_buffer[10] = 0;
+ ssize_t rc = read(fd, addr_buffer + 2, 8);
+ if (rc < 8) break;
+ unsigned start = StringToLong(addr_buffer);
+ rc = read(fd, addr_buffer + 2, 1);
+ if (rc < 1) break;
+ if (addr_buffer[2] != '-') break;
+ rc = read(fd, addr_buffer + 2, 8);
+ if (rc < 8) break;
+ unsigned end = StringToLong(addr_buffer);
+ char buffer[MAP_LENGTH];
+ int bytes_read = -1;
+ do {
+ bytes_read++;
+ if (bytes_read >= MAP_LENGTH - 1) break;
+ rc = read(fd, buffer + bytes_read, 1);
+ if (rc < 1) break;
+ } while (buffer[bytes_read] != '\n');
+ buffer[bytes_read] = 0;
+ // Ignore mappings that are not executable.
+ if (buffer[3] != 'x') continue;
+ char* start_of_path = index(buffer, '/');
+ // There may be no filename in this line. Skip to next.
+ if (start_of_path == NULL) continue;
+ buffer[bytes_read] = 0;
+ result.push_back(SharedLibraryAddress(start_of_path, start, end));
+ }
+ close(fd);
+ return result;
+}
+
+
+void OS::SignalCodeMovingGC() {}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) {}
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ DCHECK((alignment % OS::AllocateAlignment()) == 0);
+ size_t request_size =
+ RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation =
+ mmapHelper(request_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ DCHECK_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ DCHECK_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ DCHECK(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ DCHECK(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() { return address_ != NULL; }
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmapHelper(size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
+ kMmapFd, kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ int prot = PROT_READ | PROT_WRITE;
+#else
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+#endif
+ if (mprotect(base, size, prot) == -1) return false;
+
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mprotect(base, size, PROT_NONE) != -1;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() { return true; }
+}
+} // namespace v8::base
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 2cb3bbefd2..dc35d3d812 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -55,7 +55,7 @@
#include <sys/prctl.h> // NOLINT, for prctl
#endif
-#if !V8_OS_NACL
+#if !defined(V8_OS_NACL) && !defined(_AIX)
#include <sys/syscall.h>
#endif
@@ -85,8 +85,8 @@ int OS::ActivationFrameAlignment() {
// Otherwise we just assume 16 byte alignment, i.e.:
// - With gcc 4.4 the tree vectorization optimizer can generate code
// that requires 16 byte alignment such as movdqa on x86.
- // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
- // see "Mac OS X ABI Function Call Guide"
+ // - Mac OS X, PPC and Solaris (64-bit) activation frames must
+ // be 16 byte-aligned; see "Mac OS X ABI Function Call Guide"
return 16;
#endif
}
@@ -171,6 +171,20 @@ void* OS::GetRandomMmapAddr() {
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#elif V8_TARGET_ARCH_PPC64
+#if V8_OS_AIX
+ // AIX: 64 bits of virtual addressing, but we limit address range to:
+ // a) minimize Segment Lookaside Buffer (SLB) misses and
+ raw_addr &= V8_UINT64_C(0x3ffff000);
+ // Use extra address space to isolate the mmap regions.
+ raw_addr += V8_UINT64_C(0x400000000000);
+#elif V8_TARGET_BIG_ENDIAN
+ // Big-endian Linux: 44 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x03fffffff000);
+#else
+ // Little-endian Linux: 48 bits of virtual addressing.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#endif
#else
raw_addr &= 0x3ffff000;
@@ -185,6 +199,10 @@ void* OS::GetRandomMmapAddr() {
// no hint at all. The high hint prevents the break from getting hemmed in
// at low values, ceding half of the address space to the system heap.
raw_addr += 0x80000000;
+#elif V8_OS_AIX
+ // The range 0x30000000 - 0xD0000000 is available on AIX;
+ // choose the upper range.
+ raw_addr += 0x90000000;
# else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
@@ -225,6 +243,8 @@ void OS::DebugBreak() {
asm("break");
#elif V8_HOST_ARCH_MIPS64
asm("break");
+#elif V8_HOST_ARCH_PPC
+ asm("twge 2,2");
#elif V8_HOST_ARCH_IA32
#if V8_OS_NACL
asm("hlt");
@@ -239,15 +259,6 @@ void OS::DebugBreak() {
}
-// ----------------------------------------------------------------------------
-// Math functions
-
-double OS::nan_value() {
- // NAN from math.h is defined in C99 and not in POSIX.
- return NAN;
-}
-
-
int OS::GetCurrentProcessId() {
return static_cast<int>(getpid());
}
@@ -260,6 +271,8 @@ int OS::GetCurrentThreadId() {
return static_cast<int>(syscall(__NR_gettid));
#elif V8_OS_ANDROID
return static_cast<int>(gettid());
+#elif V8_OS_AIX
+ return static_cast<int>(thread_self());
#elif V8_OS_SOLARIS
return static_cast<int>(pthread_self());
#else
@@ -311,10 +324,10 @@ void OS::ClearTimezoneCache(TimezoneCache* cache) {
double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
- if (std::isnan(time)) return nan_value();
+ if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
- if (NULL == t) return nan_value();
+ if (NULL == t) return std::numeric_limits<double>::quiet_NaN();
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
@@ -525,8 +538,15 @@ void Thread::Start() {
DCHECK_EQ(0, result);
// Native client uses default stack size.
#if !V8_OS_NACL
- if (stack_size_ > 0) {
- result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ size_t stack_size = stack_size_;
+#if V8_OS_AIX
+ if (stack_size == 0) {
+ // Default on AIX is 96KB -- bump up to 2MB
+ stack_size = 2 * 1024 * 1024;
+ }
+#endif
+ if (stack_size > 0) {
+ result = pthread_attr_setstacksize(&attr, stack_size);
DCHECK_EQ(0, result);
}
#endif
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index d68e8617d7..5a6f2f1cbf 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -1162,11 +1162,6 @@ void OS::SignalCodeMovingGC() { }
#endif // __MINGW32__
-double OS::nan_value() {
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-
int OS::ActivationFrameAlignment() {
#ifdef _WIN64
return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 0bf102723a..07155f7a58 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -246,9 +246,6 @@ class OS {
// using --never-compact) if accurate profiling is desired.
static void SignalCodeMovingGC();
- // Returns the double constant NAN
- static double nan_value();
-
// Support runtime detection of whether the hard float option of the
// EABI is used.
static bool ArmUsingHardFloat();
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 40dd188db3..6734218e50 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -13,7 +13,8 @@
#include <mach/mach_time.h>
#endif
-#include <string.h>
+#include <cstring>
+#include <ostream>
#if V8_OS_WIN
#include "src/base/lazy-instance.h"
@@ -355,6 +356,11 @@ double Time::ToJsTime() const {
}
+std::ostream& operator<<(std::ostream& os, const Time& time) {
+ return os << time.ToJsTime();
+}
+
+
#if V8_OS_WIN
class TickClock {
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 9dfa47d4e5..887664e7ba 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -5,7 +5,8 @@
#ifndef V8_BASE_PLATFORM_TIME_H_
#define V8_BASE_PLATFORM_TIME_H_
-#include <time.h>
+#include <ctime>
+#include <iosfwd>
#include <limits>
#include "src/base/macros.h"
@@ -280,6 +281,8 @@ class Time FINAL {
int64_t us_;
};
+std::ostream& operator<<(std::ostream&, const Time&);
+
inline Time operator+(const TimeDelta& delta, const Time& time) {
return time + delta;
}
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index c665771b8b..617a88ea81 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -88,6 +88,9 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
#elif V8_OS_NACL
// No support for _SC_PHYS_PAGES, assume 2GB.
return static_cast<int64_t>(1) << 31;
+#elif V8_OS_AIX
+ int64_t result = sysconf(_SC_AIX_REALMEM);
+ return static_cast<int64_t>(result) * 1024L;
#elif V8_OS_POSIX
long pages = sysconf(_SC_PHYS_PAGES); // NOLINT(runtime/int)
long page_size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int)
diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/bit-vector.h
index 9fc747d06f..eeda0e91cb 100644
--- a/deps/v8/src/bit-vector.h
+++ b/deps/v8/src/bit-vector.h
@@ -5,12 +5,8 @@
#ifndef V8_DATAFLOW_H_
#define V8_DATAFLOW_H_
-#include "src/v8.h"
-
#include "src/allocation.h"
-#include "src/ast.h"
-#include "src/compiler.h"
-#include "src/zone-inl.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -104,6 +100,8 @@ class BitVector : public ZoneObject {
data_[i / kDataBits] |= (kOne << (i % kDataBits));
}
+ void AddAll() { memset(data_, -1, sizeof(uintptr_t) * data_length_); }
+
void Remove(int i) {
DCHECK(i >= 0 && i < length());
data_[i / kDataBits] &= ~(kOne << (i % kDataBits));
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 7105eb25e2..31d6e3e00c 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -5,6 +5,7 @@
#include "src/bootstrapper.h"
#include "src/accessors.h"
+#include "src/api-natives.h"
#include "src/code-stubs.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
@@ -19,28 +20,10 @@
namespace v8 {
namespace internal {
-NativesExternalStringResource::NativesExternalStringResource(
- Bootstrapper* bootstrapper,
- const char* source,
- size_t length)
- : data_(source), length_(length) {
- if (bootstrapper->delete_these_non_arrays_on_tear_down_ == NULL) {
- bootstrapper->delete_these_non_arrays_on_tear_down_ = new List<char*>(2);
- }
- // The resources are small objects and we only make a fixed number of
- // them, but let's clean them up on exit for neatness.
- bootstrapper->delete_these_non_arrays_on_tear_down_->
- Add(reinterpret_cast<char*>(this));
-}
-
-
Bootstrapper::Bootstrapper(Isolate* isolate)
: isolate_(isolate),
nesting_(0),
- extensions_cache_(Script::TYPE_EXTENSION),
- delete_these_non_arrays_on_tear_down_(NULL),
- delete_these_arrays_on_tear_down_(NULL) {
-}
+ extensions_cache_(Script::TYPE_EXTENSION) {}
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
@@ -50,9 +33,7 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
// We can use external strings for the natives.
Vector<const char> source = Natives::GetScriptSource(index);
NativesExternalStringResource* resource =
- new NativesExternalStringResource(this,
- source.start(),
- source.length());
+ new NativesExternalStringResource(source.start(), source.length());
// We do not expect this to throw an exception. Change this if it does.
Handle<String> source_code = isolate_->factory()
->NewExternalStringFromOneByte(resource)
@@ -113,39 +94,19 @@ void Bootstrapper::TearDownExtensions() {
}
-char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
- char* memory = new char[bytes];
- if (memory != NULL) {
- if (delete_these_arrays_on_tear_down_ == NULL) {
- delete_these_arrays_on_tear_down_ = new List<char*>(2);
- }
- delete_these_arrays_on_tear_down_->Add(memory);
- }
- return memory;
-}
-
-
void Bootstrapper::TearDown() {
- if (delete_these_non_arrays_on_tear_down_ != NULL) {
- int len = delete_these_non_arrays_on_tear_down_->length();
- DCHECK(len < 1000); // Don't use this mechanism for unbounded allocations.
- for (int i = 0; i < len; i++) {
- delete delete_these_non_arrays_on_tear_down_->at(i);
- delete_these_non_arrays_on_tear_down_->at(i) = NULL;
- }
- delete delete_these_non_arrays_on_tear_down_;
- delete_these_non_arrays_on_tear_down_ = NULL;
- }
-
- if (delete_these_arrays_on_tear_down_ != NULL) {
- int len = delete_these_arrays_on_tear_down_->length();
- DCHECK(len < 1000); // Don't use this mechanism for unbounded allocations.
- for (int i = 0; i < len; i++) {
- delete[] delete_these_arrays_on_tear_down_->at(i);
- delete_these_arrays_on_tear_down_->at(i) = NULL;
+ Object* natives_source_cache = isolate_->heap()->natives_source_cache();
+ if (natives_source_cache->IsFixedArray()) {
+ FixedArray* natives_source_array = FixedArray::cast(natives_source_cache);
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Object* natives_source = natives_source_array->get(i);
+ if (!natives_source->IsUndefined()) {
+ const NativesExternalStringResource* resource =
+ reinterpret_cast<const NativesExternalStringResource*>(
+ ExternalOneByteString::cast(natives_source)->resource());
+ delete resource;
+ }
}
- delete delete_these_arrays_on_tear_down_;
- delete_these_arrays_on_tear_down_ = NULL;
}
extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
@@ -183,16 +144,15 @@ class Genesis BASE_EMBEDDED {
// Make the "arguments" and "caller" properties throw a TypeError on access.
void PoisonArgumentsAndCaller(Handle<Map> map);
- // Creates the global objects using the global and the template passed in
- // through the API. We call this regardless of whether we are building a
+ // Creates the global objects using the global proxy and the template passed
+ // in through the API. We call this regardless of whether we are building a
// context from scratch or using a deserialized one from the partial snapshot
// but in the latter case we don't use the objects it produces directly, as
// we have to used the deserialized ones that are linked together with the
// rest of the context snapshot.
- Handle<JSGlobalProxy> CreateNewGlobals(
+ Handle<GlobalObject> CreateNewGlobals(
v8::Handle<v8::ObjectTemplate> global_proxy_template,
- MaybeHandle<JSGlobalProxy> maybe_global_proxy,
- Handle<GlobalObject>* global_object_out);
+ Handle<JSGlobalProxy> global_proxy);
// Hooks the given global proxy into the context. If the context was created
// by deserialization then this will unhook the global proxy that was
// deserialized, leaving the GC to pick it up.
@@ -201,7 +161,8 @@ class Genesis BASE_EMBEDDED {
// Similarly, we want to use the global that has been created by the templates
// passed through the API. The global from the snapshot is detached from the
// other objects in the snapshot.
- void HookUpGlobalObject(Handle<GlobalObject> global_object);
+ void HookUpGlobalObject(Handle<GlobalObject> global_object,
+ Handle<FixedArray> outdated_contexts);
// New context initialization. Used for creating a context from scratch.
void InitializeGlobal(Handle<GlobalObject> global_object,
Handle<JSFunction> empty_function);
@@ -210,6 +171,8 @@ class Genesis BASE_EMBEDDED {
// Used for creating a context from scratch.
void InstallNativeFunctions();
void InstallExperimentalNativeFunctions();
+ // Typed arrays are not serializable and have to initialized afterwards.
+ void InitializeBuiltinTypedArrays();
#define DECLARE_FEATURE_INITIALIZATION(id, descr) \
void InstallNativeFunctions_##id(); \
@@ -373,6 +336,9 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
global_proxy->set_native_context(*factory->null_value());
SetObjectPrototype(global_proxy, factory->null_value());
global_proxy->map()->set_constructor(*factory->null_value());
+ if (FLAG_track_detached_contexts) {
+ env->GetIsolate()->AddDetachedContext(env);
+ }
}
@@ -418,29 +384,29 @@ void Genesis::SetFunctionInstanceDescriptor(
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), attribs);
{ // Add length.
- CallbacksDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
+ length, attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> name =
Accessors::FunctionNameInfo(isolate(), attribs);
{ // Add name.
- CallbacksDescriptor d(Handle<Name>(Name::cast(name->name())),
- name, attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
+ attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> args =
Accessors::FunctionArgumentsInfo(isolate(), attribs);
{ // Add arguments.
- CallbacksDescriptor d(Handle<Name>(Name::cast(args->name())),
- args, attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(args->name())), args,
+ attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> caller =
Accessors::FunctionCallerInfo(isolate(), attribs);
{ // Add caller.
- CallbacksDescriptor d(Handle<Name>(Name::cast(caller->name())),
- caller, attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(caller->name())),
+ caller, attribs);
map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
@@ -449,8 +415,8 @@ void Genesis::SetFunctionInstanceDescriptor(
}
Handle<AccessorInfo> prototype =
Accessors::FunctionPrototypeInfo(isolate(), attribs);
- CallbacksDescriptor d(Handle<Name>(Name::cast(prototype->name())),
- prototype, attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
+ prototype, attribs);
map->AppendDescriptor(&d);
}
}
@@ -574,7 +540,7 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
// Add length.
if (function_mode == BOUND_FUNCTION) {
Handle<String> length_string = isolate()->factory()->length_string();
- FieldDescriptor d(length_string, 0, ro_attribs, Representation::Tagged());
+ DataDescriptor d(length_string, 0, ro_attribs, Representation::Tagged());
map->AppendDescriptor(&d);
} else {
DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
@@ -582,24 +548,25 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
function_mode == FUNCTION_WITHOUT_PROTOTYPE);
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), ro_attribs);
- CallbacksDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, ro_attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
+ length, ro_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> name =
Accessors::FunctionNameInfo(isolate(), ro_attribs);
{ // Add name.
- CallbacksDescriptor d(Handle<Name>(Name::cast(name->name())),
- name, ro_attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
+ ro_attribs);
map->AppendDescriptor(&d);
}
{ // Add arguments.
- CallbacksDescriptor d(factory()->arguments_string(), arguments,
- rw_attribs);
+ AccessorConstantDescriptor d(factory()->arguments_string(), arguments,
+ rw_attribs);
map->AppendDescriptor(&d);
}
{ // Add caller.
- CallbacksDescriptor d(factory()->caller_string(), caller, rw_attribs);
+ AccessorConstantDescriptor d(factory()->caller_string(), caller,
+ rw_attribs);
map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
@@ -609,8 +576,8 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
: ro_attribs;
Handle<AccessorInfo> prototype =
Accessors::FunctionPrototypeInfo(isolate(), attribs);
- CallbacksDescriptor d(Handle<Name>(Name::cast(prototype->name())),
- prototype, attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
+ prototype, attribs);
map->AppendDescriptor(&d);
}
}
@@ -709,7 +676,7 @@ static void ReplaceAccessors(Handle<Map> map,
Handle<AccessorPair> accessor_pair) {
DescriptorArray* descriptors = map->instance_descriptors();
int idx = descriptors->SearchWithCache(*name, *map);
- CallbacksDescriptor descriptor(name, accessor_pair, attributes);
+ AccessorConstantDescriptor descriptor(name, accessor_pair, attributes);
descriptors->Replace(idx, &descriptor);
}
@@ -756,14 +723,13 @@ void Genesis::CreateRoots() {
}
-Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
+Handle<GlobalObject> Genesis::CreateNewGlobals(
v8::Handle<v8::ObjectTemplate> global_proxy_template,
- MaybeHandle<JSGlobalProxy> maybe_global_proxy,
- Handle<GlobalObject>* global_object_out) {
+ Handle<JSGlobalProxy> global_proxy) {
// The argument global_proxy_template aka data is an ObjectTemplateInfo.
// It has a constructor pointer that points at global_constructor which is a
// FunctionTemplateInfo.
- // The global_proxy_constructor is used to create or reinitialize the
+ // The global_proxy_constructor is used to (re)initialize the
// global_proxy. The global_proxy_constructor also has a prototype_template
// pointer that points at js_global_object_template which is an
// ObjectTemplateInfo.
@@ -809,21 +775,17 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
} else {
Handle<FunctionTemplateInfo> js_global_object_constructor(
FunctionTemplateInfo::cast(js_global_object_template->constructor()));
- js_global_object_function =
- factory()->CreateApiFunction(js_global_object_constructor,
- factory()->the_hole_value(),
- factory()->GlobalObjectType);
+ js_global_object_function = ApiNatives::CreateApiFunction(
+ isolate(), js_global_object_constructor, factory()->the_hole_value(),
+ ApiNatives::GlobalObjectType);
}
js_global_object_function->initial_map()->set_is_hidden_prototype();
js_global_object_function->initial_map()->set_dictionary_map(true);
Handle<GlobalObject> global_object =
factory()->NewGlobalObject(js_global_object_function);
- if (global_object_out != NULL) {
- *global_object_out = global_object;
- }
- // Step 2: create or re-initialize the global proxy object.
+ // Step 2: (re)initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_proxy_template.IsEmpty()) {
Handle<String> name = Handle<String>(heap()->empty_string());
@@ -836,10 +798,9 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
v8::Utils::OpenHandle(*global_proxy_template);
Handle<FunctionTemplateInfo> global_constructor(
FunctionTemplateInfo::cast(data->constructor()));
- global_proxy_function =
- factory()->CreateApiFunction(global_constructor,
- factory()->the_hole_value(),
- factory()->GlobalProxyType);
+ global_proxy_function = ApiNatives::CreateApiFunction(
+ isolate(), global_constructor, factory()->the_hole_value(),
+ ApiNatives::GlobalProxyType);
}
Handle<String> global_name = factory()->global_string();
@@ -849,15 +810,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
// Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
// Return the global proxy.
- Handle<JSGlobalProxy> global_proxy;
- if (maybe_global_proxy.ToHandle(&global_proxy)) {
- factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
- } else {
- global_proxy = Handle<JSGlobalProxy>::cast(
- factory()->NewJSObject(global_proxy_function, TENURED));
- global_proxy->set_hash(heap()->undefined_value());
- }
- return global_proxy;
+ factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
+ return global_object;
}
@@ -867,17 +821,29 @@ void Genesis::HookUpGlobalProxy(Handle<GlobalObject> global_object,
global_object->set_native_context(*native_context());
global_object->set_global_proxy(*global_proxy);
global_proxy->set_native_context(*native_context());
+ // If we deserialized the context, the global proxy is already
+ // correctly set up. Otherwise it's undefined.
+ DCHECK(native_context()->get(Context::GLOBAL_PROXY_INDEX)->IsUndefined() ||
+ native_context()->global_proxy() == *global_proxy);
native_context()->set_global_proxy(*global_proxy);
}
-void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object) {
+void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object,
+ Handle<FixedArray> outdated_contexts) {
Handle<GlobalObject> global_object_from_snapshot(
GlobalObject::cast(native_context()->extension()));
Handle<JSBuiltinsObject> builtins_global(native_context()->builtins());
native_context()->set_extension(*global_object);
- native_context()->set_global_object(*global_object);
native_context()->set_security_token(*global_object);
+
+ // Replace outdated global objects in deserialized contexts.
+ for (int i = 0; i < outdated_contexts->length(); ++i) {
+ Context* context = Context::cast(outdated_contexts->get(i));
+ DCHECK_EQ(context->global_object(), *global_object_from_snapshot);
+ context->set_global_object(*global_object);
+ }
+
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
Runtime::DefineObjectProperty(builtins_global, factory()->global_string(),
@@ -948,9 +914,9 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Handle<AccessorInfo> array_length =
Accessors::ArrayLengthInfo(isolate, attribs);
{ // Add length.
- CallbacksDescriptor d(
- Handle<Name>(Name::cast(array_length->name())),
- array_length, attribs);
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(array_length->name())), array_length,
+ attribs);
initial_map->AppendDescriptor(&d);
}
@@ -1002,7 +968,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Accessors::StringLengthInfo(isolate, attribs));
{ // Add length.
- CallbacksDescriptor d(factory->length_string(), string_length, attribs);
+ AccessorConstantDescriptor d(factory->length_string(), string_length,
+ attribs);
string_map->AppendDescriptor(&d);
}
}
@@ -1047,41 +1014,38 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
// ECMA-262, section 15.10.7.1.
Handle<AccessorInfo> regexp_source(
Accessors::RegExpSourceInfo(isolate, final));
- CallbacksDescriptor d(factory->source_string(), regexp_source, final);
+ AccessorConstantDescriptor d(factory->source_string(), regexp_source,
+ final);
initial_map->AppendDescriptor(&d);
}
{
// ECMA-262, section 15.10.7.2.
- FieldDescriptor field(factory->global_string(),
- JSRegExp::kGlobalFieldIndex,
- final,
- Representation::Tagged());
+ DataDescriptor field(factory->global_string(),
+ JSRegExp::kGlobalFieldIndex, final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field);
}
{
// ECMA-262, section 15.10.7.3.
- FieldDescriptor field(factory->ignore_case_string(),
- JSRegExp::kIgnoreCaseFieldIndex,
- final,
- Representation::Tagged());
+ DataDescriptor field(factory->ignore_case_string(),
+ JSRegExp::kIgnoreCaseFieldIndex, final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field);
}
{
// ECMA-262, section 15.10.7.4.
- FieldDescriptor field(factory->multiline_string(),
- JSRegExp::kMultilineFieldIndex,
- final,
- Representation::Tagged());
+ DataDescriptor field(factory->multiline_string(),
+ JSRegExp::kMultilineFieldIndex, final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field);
}
{
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- FieldDescriptor field(factory->last_index_string(),
- JSRegExp::kLastIndexFieldIndex,
- writable,
- Representation::Tagged());
+ DataDescriptor field(factory->last_index_string(),
+ JSRegExp::kLastIndexFieldIndex, writable,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field);
}
@@ -1178,14 +1142,14 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Map::EnsureDescriptorSlack(iterator_result_map,
JSGeneratorObject::kResultPropertyCount);
- FieldDescriptor value_descr(factory->value_string(),
- JSGeneratorObject::kResultValuePropertyIndex,
- NONE, Representation::Tagged());
+ DataDescriptor value_descr(factory->value_string(),
+ JSGeneratorObject::kResultValuePropertyIndex,
+ NONE, Representation::Tagged());
iterator_result_map->AppendDescriptor(&value_descr);
- FieldDescriptor done_descr(factory->done_string(),
- JSGeneratorObject::kResultDonePropertyIndex,
- NONE, Representation::Tagged());
+ DataDescriptor done_descr(factory->done_string(),
+ JSGeneratorObject::kResultDonePropertyIndex, NONE,
+ Representation::Tagged());
iterator_result_map->AppendDescriptor(&done_descr);
iterator_result_map->set_unused_property_fields(0);
@@ -1219,13 +1183,13 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Map::EnsureDescriptorSlack(map, 2);
{ // length
- FieldDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
- DONT_ENUM, Representation::Tagged());
+ DataDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // callee
- FieldDescriptor d(factory->callee_string(), Heap::kArgumentsCalleeIndex,
- DONT_ENUM, Representation::Tagged());
+ DataDescriptor d(factory->callee_string(), Heap::kArgumentsCalleeIndex,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
// @@iterator method is added later.
@@ -1276,16 +1240,18 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Map::EnsureDescriptorSlack(map, 3);
{ // length
- FieldDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
- DONT_ENUM, Representation::Tagged());
+ DataDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // callee
- CallbacksDescriptor d(factory->callee_string(), callee, attributes);
+ AccessorConstantDescriptor d(factory->callee_string(), callee,
+ attributes);
map->AppendDescriptor(&d);
}
{ // caller
- CallbacksDescriptor d(factory->caller_string(), caller, attributes);
+ AccessorConstantDescriptor d(factory->caller_string(), caller,
+ attributes);
map->AppendDescriptor(&d);
}
// @@iterator method is added later.
@@ -1446,9 +1412,9 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
function_info = Compiler::CompileScript(
- source, script_name, 0, 0, false, top_context, extension, NULL,
+ source, script_name, 0, 0, false, false, top_context, extension, NULL,
ScriptCompiler::kNoCompileOptions,
- use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
+ use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE, false);
if (function_info.is_null()) return false;
if (cache != NULL) cache->Add(name, function_info);
}
@@ -1489,7 +1455,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
.ToHandleChecked());
}
const char* inner = period_pos + 1;
- DCHECK_EQ(NULL, strchr(inner, '.'));
+ DCHECK(!strchr(inner, '.'));
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
Handle<String> property_string = factory->InternalizeUtf8String(property);
@@ -1531,11 +1497,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToLength", to_length_fun);
INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
- INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
- INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
- configure_instance_fun);
INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
- INSTALL_NATIVE(JSObject, "functionCache", function_cache);
INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
to_complete_property_descriptor);
@@ -1579,6 +1541,60 @@ void Genesis::InstallExperimentalNativeFunctions() {
}
+template <typename Data>
+Data* SetBuiltinTypedArray(Isolate* isolate, Handle<JSBuiltinsObject> builtins,
+ ExternalArrayType type, Data* data,
+ size_t num_elements, const char* name) {
+ size_t byte_length = num_elements * sizeof(*data);
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ bool should_be_freed = false;
+ if (data == NULL) {
+ data = reinterpret_cast<Data*>(malloc(byte_length));
+ should_be_freed = true;
+ }
+ Runtime::SetupArrayBuffer(isolate, buffer, true, data, byte_length);
+ buffer->set_should_be_freed(should_be_freed);
+
+ Handle<JSTypedArray> typed_array =
+ isolate->factory()->NewJSTypedArray(type, buffer, 0, num_elements);
+ Handle<String> name_string = isolate->factory()->InternalizeUtf8String(name);
+ // Reset property cell type before (re)initializing.
+ JSBuiltinsObject::InvalidatePropertyCell(builtins, name_string);
+ JSObject::SetOwnPropertyIgnoreAttributes(builtins, name_string, typed_array,
+ DONT_DELETE).Assert();
+ return data;
+}
+
+
+void Genesis::InitializeBuiltinTypedArrays() {
+ Handle<JSBuiltinsObject> builtins(native_context()->builtins());
+ { // Initially seed the per-context random number generator using the
+ // per-isolate random number generator.
+ const size_t num_elements = 2;
+ const size_t num_bytes = num_elements * sizeof(uint32_t);
+ uint32_t* state = SetBuiltinTypedArray<uint32_t>(isolate(), builtins,
+ kExternalUint32Array, NULL,
+ num_elements, "rngstate");
+ do {
+ isolate()->random_number_generator()->NextBytes(state, num_bytes);
+ } while (state[0] == 0 || state[1] == 0);
+ }
+
+ { // Initialize trigonometric lookup tables and constants.
+ const size_t num_elements = arraysize(fdlibm::MathConstants::constants);
+ double* data = const_cast<double*>(fdlibm::MathConstants::constants);
+ SetBuiltinTypedArray<double>(isolate(), builtins, kExternalFloat64Array,
+ data, num_elements, "kMath");
+ }
+
+ { // Initialize a result array for rempio2 calculation
+ const size_t num_elements = 2;
+ SetBuiltinTypedArray<double>(isolate(), builtins, kExternalFloat64Array,
+ NULL, num_elements, "rempio2result");
+ }
+}
+
+
#define EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(id) \
void Genesis::InstallNativeFunctions_##id() {}
@@ -1596,6 +1612,9 @@ EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_tostring)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_templates)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode_regexps)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_computed_property_names)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_rest_parameters)
void Genesis::InstallNativeFunctions_harmony_proxies() {
@@ -1626,6 +1645,8 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_templates)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_computed_property_names)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_rest_parameters)
void Genesis::InitializeGlobal_harmony_regexps() {
Handle<JSObject> builtins(native_context()->builtins());
@@ -1639,6 +1660,19 @@ void Genesis::InitializeGlobal_harmony_regexps() {
}
+void Genesis::InitializeGlobal_harmony_unicode_regexps() {
+ Handle<JSObject> builtins(native_context()->builtins());
+
+ Handle<HeapObject> flag(FLAG_harmony_unicode_regexps ? heap()->true_value()
+ : heap()->false_value());
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+ Runtime::DefineObjectProperty(builtins,
+ factory()->harmony_unicode_regexps_string(),
+ flag, attributes).Assert();
+}
+
+
Handle<JSFunction> Genesis::InstallInternalArray(
Handle<JSBuiltinsObject> builtins,
const char* name,
@@ -1674,8 +1708,8 @@ Handle<JSFunction> Genesis::InstallInternalArray(
Handle<AccessorInfo> array_length =
Accessors::ArrayLengthInfo(isolate(), attribs);
{ // Add length.
- CallbacksDescriptor d(
- Handle<Name>(Name::cast(array_length->name())), array_length, attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(array_length->name())),
+ array_length, attribs);
initial_map->AppendDescriptor(&d);
}
@@ -1749,7 +1783,7 @@ bool Genesis::InstallNatives() {
native_context()->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
- Map::EnsureDescriptorSlack(script_map, 14);
+ Map::EnsureDescriptorSlack(script_map, 15);
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1757,16 +1791,17 @@ bool Genesis::InstallNatives() {
Handle<AccessorInfo> script_column =
Accessors::ScriptColumnOffsetInfo(isolate(), attribs);
{
- CallbacksDescriptor d(Handle<Name>(Name::cast(script_column->name())),
- script_column, attribs);
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_column->name())), script_column,
+ attribs);
script_map->AppendDescriptor(&d);
}
Handle<AccessorInfo> script_id =
Accessors::ScriptIdInfo(isolate(), attribs);
{
- CallbacksDescriptor d(Handle<Name>(Name::cast(script_id->name())),
- script_id, attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(script_id->name())),
+ script_id, attribs);
script_map->AppendDescriptor(&d);
}
@@ -1774,39 +1809,40 @@ bool Genesis::InstallNatives() {
Handle<AccessorInfo> script_name =
Accessors::ScriptNameInfo(isolate(), attribs);
{
- CallbacksDescriptor d(Handle<Name>(Name::cast(script_name->name())),
- script_name, attribs);
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_name->name())), script_name, attribs);
script_map->AppendDescriptor(&d);
}
Handle<AccessorInfo> script_line =
Accessors::ScriptLineOffsetInfo(isolate(), attribs);
{
- CallbacksDescriptor d(Handle<Name>(Name::cast(script_line->name())),
- script_line, attribs);
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_line->name())), script_line, attribs);
script_map->AppendDescriptor(&d);
}
Handle<AccessorInfo> script_source =
Accessors::ScriptSourceInfo(isolate(), attribs);
{
- CallbacksDescriptor d(Handle<Name>(Name::cast(script_source->name())),
- script_source, attribs);
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_source->name())), script_source,
+ attribs);
script_map->AppendDescriptor(&d);
}
Handle<AccessorInfo> script_type =
Accessors::ScriptTypeInfo(isolate(), attribs);
{
- CallbacksDescriptor d(Handle<Name>(Name::cast(script_type->name())),
- script_type, attribs);
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_type->name())), script_type, attribs);
script_map->AppendDescriptor(&d);
}
Handle<AccessorInfo> script_compilation_type =
Accessors::ScriptCompilationTypeInfo(isolate(), attribs);
{
- CallbacksDescriptor d(
+ AccessorConstantDescriptor d(
Handle<Name>(Name::cast(script_compilation_type->name())),
script_compilation_type, attribs);
script_map->AppendDescriptor(&d);
@@ -1815,15 +1851,16 @@ bool Genesis::InstallNatives() {
Handle<AccessorInfo> script_line_ends =
Accessors::ScriptLineEndsInfo(isolate(), attribs);
{
- CallbacksDescriptor d(Handle<Name>(Name::cast(script_line_ends->name())),
- script_line_ends, attribs);
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_line_ends->name())), script_line_ends,
+ attribs);
script_map->AppendDescriptor(&d);
}
Handle<AccessorInfo> script_context_data =
Accessors::ScriptContextDataInfo(isolate(), attribs);
{
- CallbacksDescriptor d(
+ AccessorConstantDescriptor d(
Handle<Name>(Name::cast(script_context_data->name())),
script_context_data, attribs);
script_map->AppendDescriptor(&d);
@@ -1832,7 +1869,7 @@ bool Genesis::InstallNatives() {
Handle<AccessorInfo> script_eval_from_script =
Accessors::ScriptEvalFromScriptInfo(isolate(), attribs);
{
- CallbacksDescriptor d(
+ AccessorConstantDescriptor d(
Handle<Name>(Name::cast(script_eval_from_script->name())),
script_eval_from_script, attribs);
script_map->AppendDescriptor(&d);
@@ -1841,7 +1878,7 @@ bool Genesis::InstallNatives() {
Handle<AccessorInfo> script_eval_from_script_position =
Accessors::ScriptEvalFromScriptPositionInfo(isolate(), attribs);
{
- CallbacksDescriptor d(
+ AccessorConstantDescriptor d(
Handle<Name>(Name::cast(script_eval_from_script_position->name())),
script_eval_from_script_position, attribs);
script_map->AppendDescriptor(&d);
@@ -1850,7 +1887,7 @@ bool Genesis::InstallNatives() {
Handle<AccessorInfo> script_eval_from_function_name =
Accessors::ScriptEvalFromFunctionNameInfo(isolate(), attribs);
{
- CallbacksDescriptor d(
+ AccessorConstantDescriptor d(
Handle<Name>(Name::cast(script_eval_from_function_name->name())),
script_eval_from_function_name, attribs);
script_map->AppendDescriptor(&d);
@@ -1859,20 +1896,30 @@ bool Genesis::InstallNatives() {
Handle<AccessorInfo> script_source_url =
Accessors::ScriptSourceUrlInfo(isolate(), attribs);
{
- CallbacksDescriptor d(Handle<Name>(Name::cast(script_source_url->name())),
- script_source_url, attribs);
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_source_url->name())),
+ script_source_url, attribs);
script_map->AppendDescriptor(&d);
}
Handle<AccessorInfo> script_source_mapping_url =
Accessors::ScriptSourceMappingUrlInfo(isolate(), attribs);
{
- CallbacksDescriptor d(
+ AccessorConstantDescriptor d(
Handle<Name>(Name::cast(script_source_mapping_url->name())),
script_source_mapping_url, attribs);
script_map->AppendDescriptor(&d);
}
+ Handle<AccessorInfo> script_is_embedder_debug_script =
+ Accessors::ScriptIsEmbedderDebugScriptInfo(isolate(), attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_is_embedder_debug_script->name())),
+ script_is_embedder_debug_script, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
// Allocate the empty script.
Handle<Script> script = factory()->NewScript(factory()->empty_string());
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
@@ -2012,6 +2059,8 @@ bool Genesis::InstallNatives() {
InstallNativeFunctions();
+ native_context()->set_function_cache(heap()->empty_fixed_array());
+
// Store the map for the string prototype after the natives has been compiled
// and the String function has been set up.
Handle<JSFunction> string_function(native_context()->string_function());
@@ -2053,7 +2102,7 @@ bool Genesis::InstallNatives() {
DCHECK(call->is_compiled());
// Set the expected parameters for apply to 2; required by builtin.
- apply->shared()->set_formal_parameter_count(2);
+ apply->shared()->set_internal_formal_parameter_count(2);
// Set the lengths for the functions to satisfy ECMA-262.
call->shared()->set_length(1);
@@ -2092,25 +2141,22 @@ bool Genesis::InstallNatives() {
int old = array_descriptors->SearchWithCache(
*length, array_function->initial_map());
DCHECK(old != DescriptorArray::kNotFound);
- CallbacksDescriptor desc(length,
- handle(array_descriptors->GetValue(old),
- isolate()),
- array_descriptors->GetDetails(old).attributes());
+ AccessorConstantDescriptor desc(
+ length, handle(array_descriptors->GetValue(old), isolate()),
+ array_descriptors->GetDetails(old).attributes());
initial_map->AppendDescriptor(&desc);
}
{
- FieldDescriptor index_field(factory()->index_string(),
- JSRegExpResult::kIndexIndex,
- NONE,
- Representation::Tagged());
+ DataDescriptor index_field(factory()->index_string(),
+ JSRegExpResult::kIndexIndex, NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&index_field);
}
{
- FieldDescriptor input_field(factory()->input_string(),
- JSRegExpResult::kInputIndex,
- NONE,
- Representation::Tagged());
+ DataDescriptor input_field(factory()->input_string(),
+ JSRegExpResult::kInputIndex, NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&input_field);
}
@@ -2127,22 +2173,22 @@ bool Genesis::InstallNatives() {
Handle<AccessorInfo> arguments_iterator =
Accessors::ArgumentsIteratorInfo(isolate(), attribs);
{
- CallbacksDescriptor d(factory()->iterator_symbol(), arguments_iterator,
- attribs);
+ AccessorConstantDescriptor d(factory()->iterator_symbol(),
+ arguments_iterator, attribs);
Handle<Map> map(native_context()->sloppy_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
}
{
- CallbacksDescriptor d(factory()->iterator_symbol(), arguments_iterator,
- attribs);
+ AccessorConstantDescriptor d(factory()->iterator_symbol(),
+ arguments_iterator, attribs);
Handle<Map> map(native_context()->aliased_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
}
{
- CallbacksDescriptor d(factory()->iterator_symbol(), arguments_iterator,
- attribs);
+ AccessorConstantDescriptor d(factory()->iterator_symbol(),
+ arguments_iterator, attribs);
Handle<Map> map(native_context()->strict_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
@@ -2150,7 +2196,9 @@ bool Genesis::InstallNatives() {
}
#ifdef VERIFY_HEAP
- builtins->ObjectVerify();
+ if (FLAG_verify_heap) {
+ builtins->ObjectVerify();
+ }
#endif
return true;
@@ -2165,8 +2213,7 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_proxies_natives[] = {"native proxy.js", NULL};
static const char* harmony_strings_natives[] = {"native harmony-string.js",
NULL};
- static const char* harmony_classes_natives[] = {"native harmony-classes.js",
- NULL};
+ static const char* harmony_classes_natives[] = {NULL};
static const char* harmony_modules_natives[] = {NULL};
static const char* harmony_scoping_natives[] = {NULL};
static const char* harmony_object_literals_natives[] = {NULL};
@@ -2180,6 +2227,9 @@ bool Genesis::InstallExperimentalNatives() {
"native harmony-templates.js", NULL};
static const char* harmony_sloppy_natives[] = {NULL};
static const char* harmony_unicode_natives[] = {NULL};
+ static const char* harmony_unicode_regexps_natives[] = {NULL};
+ static const char* harmony_computed_property_names_natives[] = {NULL};
+ static const char* harmony_rest_parameters_natives[] = {NULL};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2491,11 +2541,11 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<Object> function_object = Object::GetProperty(
isolate(), builtins, Builtins::GetName(id)).ToHandleChecked();
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- builtins->set_javascript_builtin(id, *function);
// TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
// the correct solution is to restore the context register after invoking
// builtins from full-codegen.
- function->shared()->set_optimization_disabled(true);
+ function->shared()->DisableOptimization(kBuiltinFunctionCannotBeOptimized);
+ builtins->set_javascript_builtin(id, *function);
if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
@@ -2544,7 +2594,7 @@ bool Genesis::ConfigureApiObject(Handle<JSObject> object,
->IsTemplateFor(object->map()));;
MaybeHandle<JSObject> maybe_obj =
- Execution::InstantiateObject(object_template);
+ ApiNatives::InstantiateObject(object_template);
Handle<JSObject> obj;
if (!maybe_obj.ToHandle(&obj)) {
DCHECK(isolate()->has_pending_exception());
@@ -2558,13 +2608,18 @@ bool Genesis::ConfigureApiObject(Handle<JSObject> object,
void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<JSObject> to) {
+ // If JSObject::AddProperty asserts due to already existing property,
+ // it is likely due to both global objects sharing property name(s).
+ // Merging those two global objects is impossible.
+ // The global template must not create properties that already exist
+ // in the snapshotted global object.
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(from->map()->instance_descriptors());
for (int i = 0; i < from->map()->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
- case FIELD: {
+ case DATA: {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
FieldIndex index = FieldIndex::ForDescriptor(from->map(), i);
@@ -2574,16 +2629,16 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
JSObject::AddProperty(to, key, value, details.attributes());
break;
}
- case CONSTANT: {
+ case DATA_CONSTANT: {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
Handle<Object> constant(descs->GetConstant(i), isolate());
JSObject::AddProperty(to, key, constant, details.attributes());
break;
}
- case ACCESSOR_FIELD:
+ case ACCESSOR:
UNREACHABLE();
- case CALLBACKS: {
+ case ACCESSOR_CONSTANT: {
Handle<Name> key(descs->GetKey(i));
LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
@@ -2593,7 +2648,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!to->HasFastProperties());
// Add to dictionary.
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
- PropertyDetails d(details.attributes(), CALLBACKS, i + 1);
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1);
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
@@ -2621,7 +2676,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
isolate());
}
PropertyDetails details = properties->DetailsAt(i);
- DCHECK_EQ(DATA, details.kind());
+ DCHECK_EQ(kData, details.kind());
JSObject::AddProperty(to, key, value, details.attributes());
}
}
@@ -2711,11 +2766,21 @@ Genesis::Genesis(Isolate* isolate,
StackLimitCheck check(isolate);
if (check.HasOverflowed()) return;
+ // The deserializer needs to hook up references to the global proxy.
+ // Create an uninitialized global proxy now if we don't have one
+ // and initialize it later in CreateNewGlobals.
+ Handle<JSGlobalProxy> global_proxy;
+ if (!maybe_global_proxy.ToHandle(&global_proxy)) {
+ global_proxy = isolate->factory()->NewUninitializedJSGlobalProxy();
+ }
+
// We can only de-serialize a context if the isolate was initialized from
// a snapshot. Otherwise we have to build the context from scratch.
- if (isolate->initialized_from_snapshot()) {
- native_context_ = Snapshot::NewContextFromSnapshot(isolate);
- } else {
+ Handle<FixedArray> outdated_contexts;
+ if (!isolate->initialized_from_snapshot() ||
+ !Snapshot::NewContextFromSnapshot(isolate, global_proxy,
+ &outdated_contexts)
+ .ToHandle(&native_context_)) {
native_context_ = Handle<Context>();
}
@@ -2732,12 +2797,11 @@ Genesis::Genesis(Isolate* isolate,
Map::TraceAllTransitions(object_fun->initial_map());
}
#endif
- Handle<GlobalObject> global_object;
- Handle<JSGlobalProxy> global_proxy = CreateNewGlobals(
- global_proxy_template, maybe_global_proxy, &global_object);
+ Handle<GlobalObject> global_object =
+ CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
- HookUpGlobalObject(global_object);
+ HookUpGlobalObject(global_object, outdated_contexts);
native_context()->builtins()->set_global_proxy(
native_context()->global_proxy());
@@ -2747,9 +2811,8 @@ Genesis::Genesis(Isolate* isolate,
CreateRoots();
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
- Handle<GlobalObject> global_object;
- Handle<JSGlobalProxy> global_proxy = CreateNewGlobals(
- global_proxy_template, maybe_global_proxy, &global_object);
+ Handle<GlobalObject> global_object =
+ CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
InitializeGlobal(global_object, empty_function);
InstallJSFunctionResultCaches();
@@ -2766,43 +2829,9 @@ Genesis::Genesis(Isolate* isolate,
if (!InstallExperimentalNatives()) return;
InitializeExperimentalGlobal();
- // We can't (de-)serialize typed arrays currently, but we are lucky: The state
- // of the random number generator needs no initialization during snapshot
- // creation time and we don't need trigonometric functions then.
- if (!isolate->serializer_enabled()) {
- // Initially seed the per-context random number generator using the
- // per-isolate random number generator.
- const int num_elems = 2;
- const int num_bytes = num_elems * sizeof(uint32_t);
- uint32_t* state = reinterpret_cast<uint32_t*>(malloc(num_bytes));
-
- do {
- isolate->random_number_generator()->NextBytes(state, num_bytes);
- } while (state[0] == 0 || state[1] == 0);
-
- v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(
- reinterpret_cast<v8::Isolate*>(isolate), state, num_bytes);
- Utils::OpenHandle(*buffer)->set_should_be_freed(true);
- v8::Local<v8::Uint32Array> ta = v8::Uint32Array::New(buffer, 0, num_elems);
- Handle<JSBuiltinsObject> builtins(native_context()->builtins());
- Runtime::DefineObjectProperty(builtins, factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("rngstate")),
- Utils::OpenHandle(*ta), NONE).Assert();
-
- // Initialize trigonometric lookup tables and constants.
- const int constants_size = arraysize(fdlibm::MathConstants::constants);
- const int table_num_bytes = constants_size * kDoubleSize;
- v8::Local<v8::ArrayBuffer> trig_buffer = v8::ArrayBuffer::New(
- reinterpret_cast<v8::Isolate*>(isolate),
- const_cast<double*>(fdlibm::MathConstants::constants), table_num_bytes);
- v8::Local<v8::Float64Array> trig_table =
- v8::Float64Array::New(trig_buffer, 0, constants_size);
-
- Runtime::DefineObjectProperty(
- builtins,
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("kMath")),
- Utils::OpenHandle(*trig_table), NONE).Assert();
- }
+ // The serializer cannot serialize typed arrays. Reset those typed arrays
+ // for each new context.
+ InitializeBuiltinTypedArrays();
result_ = native_context();
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 9d4f270842..4bf74b350b 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -98,10 +98,6 @@ class Bootstrapper FINAL {
char* RestoreState(char* from);
void FreeThreadResources();
- // This will allocate a char array that is deleted when V8 is shut down.
- // It should only be used for strictly finite allocations.
- char* AllocateAutoDeletedArray(int bytes);
-
// Used for new context creation.
bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
@@ -113,10 +109,6 @@ class Bootstrapper FINAL {
typedef int NestingCounterType;
NestingCounterType nesting_;
SourceCodeCache extensions_cache_;
- // This is for delete, not delete[].
- List<char*>* delete_these_non_arrays_on_tear_down_;
- // This is for delete[]
- List<char*>* delete_these_arrays_on_tear_down_;
friend class BootstrapperActive;
friend class Isolate;
@@ -155,9 +147,8 @@ class BootstrapperActive FINAL BASE_EMBEDDED {
class NativesExternalStringResource FINAL
: public v8::String::ExternalOneByteStringResource {
public:
- NativesExternalStringResource(Bootstrapper* bootstrapper,
- const char* source,
- size_t length);
+ NativesExternalStringResource(const char* source, size_t length)
+ : data_(source), length_(length) {}
const char* data() const OVERRIDE { return data_; }
size_t length() const OVERRIDE { return length_; }
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index b8d0b42d50..21c246ca42 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/api.h"
+#include "src/api-natives.h"
#include "src/arguments.h"
#include "src/base/once.h"
#include "src/bootstrapper.h"
@@ -665,7 +666,7 @@ BUILTIN(ArraySlice) {
bool packed = true;
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
for (int i = k; i < final; i++) {
- if (!accessor->HasElement(object, object, i, elms)) {
+ if (!accessor->HasElement(object, i, elms)) {
packed = false;
break;
}
@@ -1022,108 +1023,45 @@ BUILTIN(GeneratorPoisonPill) {
//
-// Searches the hidden prototype chain of the given object for the first
-// object that is an instance of the given type. If no such object can
-// be found then Heap::null_value() is returned.
-static inline Object* FindHidden(Heap* heap,
- Object* object,
- FunctionTemplateInfo* type) {
- for (PrototypeIterator iter(heap->isolate(), object,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
- if (type->IsTemplateFor(iter.GetCurrent())) {
- return iter.GetCurrent();
- }
- }
- return heap->null_value();
-}
-
-
-// Returns the holder JSObject if the function can legally be called
-// with this receiver. Returns Heap::null_value() if the call is
-// illegal. Any arguments that don't fit the expected type is
-// overwritten with undefined. Note that holder and the arguments are
-// implicitly rewritten with the first object in the hidden prototype
-// chain that actually has the expected type.
-static inline Object* TypeCheck(Heap* heap,
- int argc,
- Object** argv,
- FunctionTemplateInfo* info) {
- Object* recv = argv[0];
- // API calls are only supported with JSObject receivers.
- if (!recv->IsJSObject()) return heap->null_value();
- Object* sig_obj = info->signature();
- if (sig_obj->IsUndefined()) return recv;
- SignatureInfo* sig = SignatureInfo::cast(sig_obj);
- // If necessary, check the receiver
- Object* recv_type = sig->receiver();
- Object* holder = recv;
- if (!recv_type->IsUndefined()) {
- holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
- if (holder == heap->null_value()) return heap->null_value();
- }
- Object* args_obj = sig->args();
- // If there is no argument signature we're done
- if (args_obj->IsUndefined()) return holder;
- FixedArray* args = FixedArray::cast(args_obj);
- int length = args->length();
- if (argc <= length) length = argc - 1;
- for (int i = 0; i < length; i++) {
- Object* argtype = args->get(i);
- if (argtype->IsUndefined()) continue;
- Object** arg = &argv[-1 - i];
- Object* current = *arg;
- current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
- if (current == heap->null_value()) current = heap->undefined_value();
- *arg = current;
- }
- return holder;
-}
-
-
template <bool is_construct>
-MUST_USE_RESULT static Object* HandleApiCallHelper(
- BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
- DCHECK(is_construct == CalledAsConstructor(isolate));
- Heap* heap = isolate->heap();
-
+MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
+ Isolate* isolate, BuiltinArguments<NEEDS_CALLED_FUNCTION>& args) {
HandleScope scope(isolate);
Handle<JSFunction> function = args.called_function();
- DCHECK(function->shared()->IsApiFunction());
+ // TODO(ishell): turn this back to a DCHECK.
+ CHECK(function->shared()->IsApiFunction());
Handle<FunctionTemplateInfo> fun_data(
function->shared()->get_api_func_data(), isolate);
if (is_construct) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ ASSIGN_RETURN_ON_EXCEPTION(
isolate, fun_data,
- isolate->factory()->ConfigureInstance(
- fun_data, Handle<JSObject>::cast(args.receiver())));
+ ApiNatives::ConfigureInstance(isolate, fun_data,
+ Handle<JSObject>::cast(args.receiver())),
+ Object);
}
- SharedFunctionInfo* shared = function->shared();
- if (shared->strict_mode() == SLOPPY && !shared->native()) {
- Object* recv = args[0];
- DCHECK(!recv->IsNull());
- if (recv->IsUndefined()) args[0] = function->global_proxy();
- }
+ DCHECK(!args[0]->IsNull());
+ if (args[0]->IsUndefined()) args[0] = function->global_proxy();
- Object* raw_holder = TypeCheck(heap, args.length(), &args[0], *fun_data);
+ Object* raw_holder = fun_data->GetCompatibleReceiver(isolate, args[0]);
if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort!
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError("illegal_invocation", HandleVector(&function, 1)));
+ THROW_NEW_ERROR(
+ isolate, NewTypeError("illegal_invocation", HandleVector(&function, 1)),
+ Object);
}
Object* raw_call_data = fun_data->call_code();
if (!raw_call_data->IsUndefined()) {
+ // TODO(ishell): remove this debugging code.
+ CHECK(raw_call_data->IsCallHandlerInfo());
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
v8::FunctionCallback callback =
v8::ToCData<v8::FunctionCallback>(callback_obj);
Object* data_obj = call_data->data();
- Object* result;
LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
DCHECK(raw_holder->IsJSObject());
@@ -1137,28 +1075,93 @@ MUST_USE_RESULT static Object* HandleApiCallHelper(
is_construct);
v8::Handle<v8::Value> value = custom.Call(callback);
+ Handle<Object> result;
if (value.IsEmpty()) {
- result = heap->undefined_value();
+ result = isolate->factory()->undefined_value();
} else {
- result = *reinterpret_cast<Object**>(*value);
+ result = v8::Utils::OpenHandle(*value);
result->VerifyApiCallResultType();
}
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- if (!is_construct || result->IsJSObject()) return result;
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (!is_construct || result->IsJSObject()) {
+ return scope.CloseAndEscape(result);
+ }
}
- return *args.receiver();
+ return scope.CloseAndEscape(args.receiver());
}
BUILTIN(HandleApiCall) {
- return HandleApiCallHelper<false>(args, isolate);
+ HandleScope scope(isolate);
+ DCHECK(!CalledAsConstructor(isolate));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ HandleApiCallHelper<false>(isolate, args));
+ return *result;
}
BUILTIN(HandleApiCallConstruct) {
- return HandleApiCallHelper<true>(args, isolate);
+ HandleScope scope(isolate);
+ DCHECK(CalledAsConstructor(isolate));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ HandleApiCallHelper<true>(isolate, args));
+ return *result;
+}
+
+
+namespace {
+
+class RelocatableArguments : public BuiltinArguments<NEEDS_CALLED_FUNCTION>,
+ public Relocatable {
+ public:
+ RelocatableArguments(Isolate* isolate, int length, Object** arguments)
+ : BuiltinArguments<NEEDS_CALLED_FUNCTION>(length, arguments),
+ Relocatable(isolate) {}
+
+ virtual inline void IterateInstance(ObjectVisitor* v) {
+ if (length() == 0) return;
+ v->VisitPointers(lowest_address(), highest_address() + 1);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RelocatableArguments);
+};
+
+} // namespace
+
+
+MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<JSFunction> function,
+ Handle<Object> receiver,
+ int argc,
+ Handle<Object> args[]) {
+ // Construct BuiltinArguments object: function, arguments reversed, receiver.
+ const int kBufferSize = 32;
+ Object* small_argv[kBufferSize];
+ Object** argv;
+ if (argc + 2 <= kBufferSize) {
+ argv = small_argv;
+ } else {
+ argv = new Object* [argc + 2];
+ }
+ argv[argc + 1] = *receiver;
+ for (int i = 0; i < argc; ++i) {
+ argv[argc - i] = *args[i];
+ }
+ argv[0] = *function;
+ MaybeHandle<Object> result;
+ {
+ auto isolate = function->GetIsolate();
+ RelocatableArguments arguments(isolate, argc + 2, &argv[argc + 1]);
+ result = HandleApiCallHelper<false>(isolate, arguments);
+ }
+ if (argv != small_argv) {
+ delete[] argv;
+ }
+ return result;
}
@@ -1183,10 +1186,13 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
// used to create the called object.
DCHECK(obj->map()->has_instance_call_handler());
JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
- DCHECK(constructor->shared()->IsApiFunction());
+ // TODO(ishell): turn this back to a DCHECK.
+ CHECK(constructor->shared()->IsApiFunction());
Object* handler =
constructor->shared()->get_api_func_data()->instance_call_handler();
DCHECK(!handler->IsUndefined());
+ // TODO(ishell): remove this debugging code.
+ CHECK(handler->IsCallHandlerInfo());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
Object* callback_obj = call_data->callback();
v8::FunctionCallback callback =
@@ -1268,8 +1274,8 @@ static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
}
-static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
- KeyedLoadIC::GenerateGeneric(masm);
+static void Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateMegamorphic(masm);
}
@@ -1313,16 +1319,6 @@ static void Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
}
-static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, SLOPPY);
-}
-
-
-static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, STRICT);
-}
-
-
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
KeyedStoreIC::GenerateMiss(masm);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 13a4b8027f..cfbb77d7a3 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -67,6 +67,7 @@ enum BuiltinExtraArguments {
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubForDerived, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@@ -87,7 +88,7 @@ enum BuiltinExtraArguments {
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
kNoExtraICState) \
- V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, kNoExtraICState) \
+ V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
\
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, StoreIC::kStrictModeState) \
\
@@ -95,7 +96,6 @@ enum BuiltinExtraArguments {
V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
kNoExtraICState) \
V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
- V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
StoreIC::kStrictModeState) \
@@ -103,8 +103,6 @@ enum BuiltinExtraArguments {
StoreIC::kStrictModeState) \
V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
StoreIC::kStrictModeState) \
- V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
- StoreIC::kStrictModeState) \
V(KeyedStoreIC_SloppyArguments, KEYED_STORE_IC, MONOMORPHIC, \
kNoExtraICState) \
\
@@ -165,35 +163,36 @@ enum BuiltinExtraArguments {
DEBUG_BREAK)
// Define list of builtins implemented in JavaScript.
-#define BUILTINS_LIST_JS(V) \
- V(EQUALS, 1) \
- V(STRICT_EQUALS, 1) \
- V(COMPARE, 2) \
- V(ADD, 1) \
- V(SUB, 1) \
- V(MUL, 1) \
- V(DIV, 1) \
- V(MOD, 1) \
- V(BIT_OR, 1) \
- V(BIT_AND, 1) \
- V(BIT_XOR, 1) \
- V(SHL, 1) \
- V(SAR, 1) \
- V(SHR, 1) \
- V(DELETE, 2) \
- V(IN, 1) \
- V(INSTANCE_OF, 1) \
- V(FILTER_KEY, 1) \
- V(CALL_NON_FUNCTION, 0) \
- V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
+#define BUILTINS_LIST_JS(V) \
+ V(EQUALS, 1) \
+ V(STRICT_EQUALS, 1) \
+ V(COMPARE, 2) \
+ V(ADD, 1) \
+ V(SUB, 1) \
+ V(MUL, 1) \
+ V(DIV, 1) \
+ V(MOD, 1) \
+ V(BIT_OR, 1) \
+ V(BIT_AND, 1) \
+ V(BIT_XOR, 1) \
+ V(SHL, 1) \
+ V(SAR, 1) \
+ V(SHR, 1) \
+ V(DELETE, 2) \
+ V(IN, 1) \
+ V(INSTANCE_OF, 1) \
+ V(FILTER_KEY, 1) \
+ V(CALL_NON_FUNCTION, 0) \
+ V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
V(CALL_FUNCTION_PROXY, 1) \
V(CALL_FUNCTION_PROXY_AS_CONSTRUCTOR, 1) \
- V(TO_OBJECT, 0) \
- V(TO_NUMBER, 0) \
- V(TO_STRING, 0) \
- V(STRING_ADD_LEFT, 1) \
- V(STRING_ADD_RIGHT, 1) \
- V(APPLY_PREPARE, 1) \
+ V(TO_OBJECT, 0) \
+ V(TO_NUMBER, 0) \
+ V(TO_STRING, 0) \
+ V(TO_NAME, 0) \
+ V(STRING_ADD_LEFT, 1) \
+ V(STRING_ADD_RIGHT, 1) \
+ V(APPLY_PREPARE, 1) \
V(STACK_OVERFLOW, 1)
class BuiltinFunctionTable;
@@ -274,11 +273,14 @@ class Builtins {
return names_[index];
}
static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
- Handle<Code> GetCode(JavaScript id, bool* resolved);
static int NumberOfJavaScriptBuiltins() { return id_count; }
bool is_initialized() const { return initialized_; }
+ MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
+ Handle<JSFunction> function, Handle<Object> receiver, int argc,
+ Handle<Object> args[]);
+
private:
Builtins();
@@ -301,6 +303,7 @@ class Builtins {
static void Generate_CompileOptimized(MacroAssembler* masm);
static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
+ static void Generate_JSConstructStubForDerived(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index e5a4caa6c8..2871a66c64 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -4,85 +4,6 @@
#include "src/checks.h"
-#include "src/v8.h"
-
namespace v8 {
-namespace internal {
-
-intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
-
-} } // namespace v8::internal
-
-
-static bool CheckEqualsStrict(volatile double* exp, volatile double* val) {
- v8::internal::DoubleRepresentation exp_rep(*exp);
- v8::internal::DoubleRepresentation val_rep(*val);
- if (std::isnan(exp_rep.value) && std::isnan(val_rep.value)) return true;
- return exp_rep.bits == val_rep.bits;
-}
-
-
-void CheckEqualsHelper(const char* file, int line, const char* expected_source,
- double expected, const char* value_source,
- double value) {
- // Force values to 64 bit memory to truncate 80 bit precision on IA32.
- volatile double* exp = new double[1];
- *exp = expected;
- volatile double* val = new double[1];
- *val = value;
- if (!CheckEqualsStrict(exp, val)) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
- expected_source, value_source, *exp, *val);
- }
- delete[] exp;
- delete[] val;
-}
-
-
-void CheckNonEqualsHelper(const char* file, int line,
- const char* expected_source, double expected,
- const char* value_source, double value) {
- // Force values to 64 bit memory to truncate 80 bit precision on IA32.
- volatile double* exp = new double[1];
- *exp = expected;
- volatile double* val = new double[1];
- *val = value;
- if (CheckEqualsStrict(exp, val)) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
- expected_source, value_source, *exp, *val);
- }
- delete[] exp;
- delete[] val;
-}
-
-
-void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- v8::Handle<v8::Value> expected,
- const char* value_source,
- v8::Handle<v8::Value> value) {
- if (!expected->Equals(value)) {
- v8::String::Utf8Value value_str(value);
- v8::String::Utf8Value expected_str(expected);
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
- expected_source, value_source, *expected_str, *value_str);
- }
-}
-
-
-void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- v8::Handle<v8::Value> unexpected,
- const char* value_source,
- v8::Handle<v8::Value> value) {
- if (unexpected->Equals(value)) {
- v8::String::Utf8Value value_str(value);
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
- unexpected_source, value_source, *value_str);
- }
-}
+namespace internal {} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 6ba64c1225..54ac92649b 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -5,6 +5,7 @@
#ifndef V8_CHECKS_H_
#define V8_CHECKS_H_
+#include "include/v8.h"
#include "src/base/logging.h"
namespace v8 {
@@ -14,8 +15,6 @@ template <class T> class Handle;
namespace internal {
-intptr_t HeapObjectTagMask();
-
#ifdef ENABLE_SLOW_DCHECKS
#define SLOW_DCHECK(condition) \
CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
@@ -27,30 +26,11 @@ const bool FLAG_enable_slow_asserts = false;
} } // namespace v8::internal
+#define DCHECK_TAG_ALIGNED(address) \
+ DCHECK((reinterpret_cast<intptr_t>(address) & \
+ ::v8::internal::kHeapObjectTagMask) == 0)
-void CheckNonEqualsHelper(const char* file, int line,
- const char* expected_source, double expected,
- const char* value_source, double value);
-
-void CheckEqualsHelper(const char* file, int line, const char* expected_source,
- double expected, const char* value_source, double value);
-
-void CheckNonEqualsHelper(const char* file, int line,
- const char* unexpected_source,
- v8::Handle<v8::Value> unexpected,
- const char* value_source,
- v8::Handle<v8::Value> value);
-
-void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- v8::Handle<v8::Value> expected,
- const char* value_source,
- v8::Handle<v8::Value> value);
-
-#define DCHECK_TAG_ALIGNED(address) \
- DCHECK((reinterpret_cast<intptr_t>(address) & HeapObjectTagMask()) == 0)
-
-#define DCHECK_SIZE_TAG_ALIGNED(size) DCHECK((size & HeapObjectTagMask()) == 0)
+#define DCHECK_SIZE_TAG_ALIGNED(size) \
+ DCHECK((size & ::v8::internal::kHeapObjectTagMask) == 0)
#endif // V8_CHECKS_H_
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index e68d539207..590dbbb027 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -49,17 +49,35 @@ Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
// static
-Callable CodeFactory::StoreIC(Isolate* isolate, StrictMode mode) {
- return Callable(StoreIC::initialize_stub(isolate, mode),
+Callable CodeFactory::CallIC(Isolate* isolate, int argc,
+ CallICState::CallType call_type) {
+ return Callable(CallIC::initialize_stub(isolate, argc, call_type),
+ CallFunctionWithFeedbackDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
+ CallICState::CallType call_type) {
+ return Callable(
+ CallIC::initialize_stub_in_optimized_code(isolate, argc, call_type),
+ CallFunctionWithFeedbackAndVectorDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
+ return Callable(StoreIC::initialize_stub(isolate, language_mode),
StoreDescriptor(isolate));
}
// static
-Callable CodeFactory::KeyedStoreIC(Isolate* isolate, StrictMode mode) {
- Handle<Code> ic = mode == SLOPPY
- ? isolate->builtins()->KeyedStoreIC_Initialize()
- : isolate->builtins()->KeyedStoreIC_Initialize_Strict();
+Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
+ LanguageMode language_mode) {
+ Handle<Code> ic = is_strict(language_mode)
+ ? isolate->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate->builtins()->KeyedStoreIC_Initialize();
return Callable(ic, StoreDescriptor(isolate));
}
@@ -72,9 +90,8 @@ Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
// static
-Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
- OverwriteMode mode) {
- BinaryOpICStub stub(isolate, op, mode);
+Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op) {
+ BinaryOpICStub stub(isolate, op);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index f26bf2a28e..5fd1646d52 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -36,13 +36,16 @@ class CodeFactory FINAL {
static Callable LoadICInOptimizedCode(Isolate* isolate, ContextualMode mode);
static Callable KeyedLoadIC(Isolate* isolate);
static Callable KeyedLoadICInOptimizedCode(Isolate* isolate);
- static Callable StoreIC(Isolate* isolate, StrictMode mode);
- static Callable KeyedStoreIC(Isolate* isolate, StrictMode mode);
+ static Callable CallIC(Isolate* isolate, int argc,
+ CallICState::CallType call_type);
+ static Callable CallICInOptimizedCode(Isolate* isolate, int argc,
+ CallICState::CallType call_type);
+ static Callable StoreIC(Isolate* isolate, LanguageMode mode);
+ static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
static Callable CompareIC(Isolate* isolate, Token::Value op);
- static Callable BinaryOpIC(Isolate* isolate, Token::Value op,
- OverwriteMode mode = NO_OVERWRITE);
+ static Callable BinaryOpIC(Isolate* isolate, Token::Value op);
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 800a09dd22..f776abc043 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -58,7 +58,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
return arguments_length_;
}
CompilationInfo* info() { return info_; }
- HydrogenCodeStub* stub() { return info_->code_stub(); }
+ CodeStub* stub() { return info_->code_stub(); }
HContext* context() { return context_; }
Isolate* isolate() { return info_->isolate(); }
@@ -228,7 +228,7 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
IfBuilder builder(this);
builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined);
builder.Then();
- builder.ElseDeopt("Forced deopt to runtime");
+ builder.ElseDeopt(Deoptimizer::kForcedDeoptToRuntime);
return undefined;
}
@@ -369,7 +369,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
if_fixed_cow.End();
zero_capacity.End();
- checker.ElseDeopt("Uninitialized boilerplate literals");
+ checker.ElseDeopt(Deoptimizer::kUninitializedBoilerplateLiterals);
checker.End();
return environment()->Pop();
@@ -436,7 +436,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
}
environment()->Push(object);
- checker.ElseDeopt("Uninitialized boilerplate in fast clone");
+ checker.ElseDeopt(Deoptimizer::kUninitializedBoilerplateInFastClone);
checker.End();
return environment()->Pop();
@@ -450,6 +450,10 @@ Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
+ // This stub is performance sensitive, the generated code must be tuned
+ // so that it doesn't build an eager frame.
+ info()->MarkMustNotHaveEagerFrame();
+
HValue* size = Add<HConstant>(AllocationSite::kSize);
HInstruction* object = Add<HAllocate>(size, HType::JSObject(), TENURED,
JS_OBJECT_TYPE);
@@ -523,6 +527,36 @@ Handle<Code> CreateAllocationSiteStub::GenerateCode() {
template <>
+HValue* CodeStubGraphBuilder<CreateWeakCellStub>::BuildCodeStub() {
+ // This stub is performance sensitive, the generated code must be tuned
+ // so that it doesn't build an eager frame.
+ info()->MarkMustNotHaveEagerFrame();
+
+ HValue* size = Add<HConstant>(WeakCell::kSize);
+ HInstruction* object =
+ Add<HAllocate>(size, HType::JSObject(), TENURED, JS_OBJECT_TYPE);
+
+ Handle<Map> weak_cell_map = isolate()->factory()->weak_cell_map();
+ AddStoreMapConstant(object, weak_cell_map);
+
+ HInstruction* value = GetParameter(CreateWeakCellDescriptor::kValueIndex);
+ Add<HStoreNamedField>(object, HObjectAccess::ForWeakCellValue(), value);
+ Add<HStoreNamedField>(object, HObjectAccess::ForWeakCellNext(),
+ graph()->GetConstantUndefined());
+
+ HInstruction* feedback_vector =
+ GetParameter(CreateWeakCellDescriptor::kVectorIndex);
+ HInstruction* slot = GetParameter(CreateWeakCellDescriptor::kSlotIndex);
+ Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ INITIALIZING_STORE);
+ return graph()->GetConstant0();
+}
+
+
+Handle<Code> CreateWeakCellStub::GenerateCode() { return DoGenerateCode(this); }
+
+
+template <>
HValue* CodeStubGraphBuilder<LoadScriptContextFieldStub>::BuildCodeStub() {
int context_index = casted_stub()->context_index();
int slot_index = casted_stub()->slot_index();
@@ -635,7 +669,7 @@ HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
result = Add<HLoadKeyed>(backing_store, key, nullptr, FAST_HOLEY_ELEMENTS,
NEVER_RETURN_HOLE);
}
- in_unmapped_range.ElseDeopt("Outside of range");
+ in_unmapped_range.ElseDeopt(Deoptimizer::kOutsideOfRange);
in_unmapped_range.End();
return result;
}
@@ -676,7 +710,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
IfBuilder positive_smi(this);
positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
Token::LT);
- positive_smi.ThenDeopt("key is negative");
+ positive_smi.ThenDeopt(Deoptimizer::kKeyIsNegative);
positive_smi.End();
HValue* constant_two = Add<HConstant>(2);
@@ -702,7 +736,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
// smi check is being emitted.
HValue* the_context = Add<HLoadKeyed>(elements, graph()->GetConstant0(),
nullptr, FAST_ELEMENTS);
- DCHECK(Context::kHeaderSize == FixedArray::kHeaderSize);
+ STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
HValue* result = Add<HLoadKeyed>(the_context, mapped_index, nullptr,
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
environment()->Push(result);
@@ -1196,21 +1230,6 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
result = EnforceNumberType(result, result_type);
}
- // Reuse the double box of one of the operands if we are allowed to (i.e.
- // chained binops).
- if (state.CanReuseDoubleBox()) {
- HValue* operand = (state.mode() == OVERWRITE_LEFT) ? left : right;
- IfBuilder if_heap_number(this);
- if_heap_number.If<HHasInstanceTypeAndBranch>(operand, HEAP_NUMBER_TYPE);
- if_heap_number.Then();
- Add<HStoreNamedField>(operand, HObjectAccess::ForHeapNumberValue(), result);
- Push(operand);
- if_heap_number.Else();
- Push(result);
- if_heap_number.End();
- result = Pop();
- }
-
return result;
}
@@ -1310,22 +1329,31 @@ Handle<Code> ToBooleanStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
StoreGlobalStub* stub = casted_stub();
- Handle<Object> placeholer_value(Smi::FromInt(0), isolate());
- Handle<PropertyCell> placeholder_cell =
- isolate()->factory()->NewPropertyCell(placeholer_value);
-
HParameter* value = GetParameter(StoreDescriptor::kValueIndex);
-
if (stub->check_global()) {
// Check that the map of the global has not changed: use a placeholder map
// that will be replaced later with the global object's map.
+ HParameter* proxy = GetParameter(StoreDescriptor::kReceiverIndex);
+ HValue* proxy_map =
+ Add<HLoadNamedField>(proxy, nullptr, HObjectAccess::ForMap());
+ HValue* global =
+ Add<HLoadNamedField>(proxy_map, nullptr, HObjectAccess::ForPrototype());
Handle<Map> placeholder_map = isolate()->factory()->meta_map();
- HValue* global = Add<HConstant>(
- StoreGlobalStub::global_placeholder(isolate()));
- Add<HCheckMaps>(global, placeholder_map);
+ HValue* cell = Add<HConstant>(Map::WeakCellForMap(placeholder_map));
+ HValue* expected_map =
+ Add<HLoadNamedField>(cell, nullptr, HObjectAccess::ForWeakCellValue());
+ HValue* map =
+ Add<HLoadNamedField>(global, nullptr, HObjectAccess::ForMap());
+ IfBuilder map_check(this);
+ map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
+ map_check.ThenDeopt(Deoptimizer::kUnknownMap);
+ map_check.End();
}
- HValue* cell = Add<HConstant>(placeholder_cell);
+ HValue* weak_cell = Add<HConstant>(isolate()->factory()->NewWeakCell(
+ StoreGlobalStub::property_cell_placeholder(isolate())));
+ HValue* cell = Add<HLoadNamedField>(weak_cell, nullptr,
+ HObjectAccess::ForWeakCellValue());
HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
HValue* cell_contents = Add<HLoadNamedField>(cell, nullptr, access);
@@ -1333,7 +1361,8 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
IfBuilder builder(this);
builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
builder.Then();
- builder.ElseDeopt("Unexpected cell contents in constant global store");
+ builder.ElseDeopt(
+ Deoptimizer::kUnexpectedCellContentsInConstantGlobalStore);
builder.End();
} else {
// Load the payload of the global parameter cell. A hole indicates that the
@@ -1343,9 +1372,10 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
HValue* hole_value = graph()->GetConstantHole();
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
builder.Then();
- builder.Deopt("Unexpected cell contents in global store");
+ builder.Deopt(Deoptimizer::kUnexpectedCellContentsInGlobalStore);
builder.Else();
- Add<HStoreNamedField>(cell, access, value);
+ HStoreNamedField* store = Add<HStoreNamedField>(cell, access, value);
+ store->MarkReceiverAsCell();
builder.End();
}
@@ -1367,7 +1397,8 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
- Add<HDeoptimize>("Tracing elements transitions", Deoptimizer::EAGER);
+ Add<HDeoptimize>(Deoptimizer::kTracingElementsTransitions,
+ Deoptimizer::EAGER);
} else {
info()->MarkAsSavesCallerDoubles();
@@ -1556,7 +1587,7 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
HInstruction* js_function =
Add<HAllocate>(size, HType::JSObject(), NOT_TENURED, JS_FUNCTION_TYPE);
- int map_index = Context::FunctionMapIndex(casted_stub()->strict_mode(),
+ int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
casted_stub()->kind());
// Compute the function map in the current native context and set that
@@ -1830,7 +1861,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
BuildElementsKindLimitCheck(&kind_if, bit_field2,
SLOPPY_ARGUMENTS_ELEMENTS);
// Non-strict elements are not handled.
- Add<HDeoptimize>("non-strict elements in KeyedLoadGenericStub",
+ Add<HDeoptimize>(Deoptimizer::kNonStrictElementsInKeyedLoadGenericStub,
Deoptimizer::EAGER);
Push(graph()->GetConstant0());
@@ -1870,7 +1901,8 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
EXTERNAL_UINT8_CLAMPED_ELEMENTS);
- kind_if.ElseDeopt("ElementsKind unhandled in KeyedLoadGenericStub");
+ kind_if.ElseDeopt(
+ Deoptimizer::kElementsKindUnhandledInKeyedLoadGenericStub);
kind_if.End();
}
@@ -1907,6 +1939,9 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
}
if_dict_properties.Else();
{
+ // TODO(dcarney): don't use keyed lookup cache, but convert to use
+ // megamorphic stub cache.
+ UNREACHABLE();
// Key is string, properties are fast mode
HValue* hash = BuildKeyedLookupCacheHash(receiver, key);
@@ -2033,51 +2068,56 @@ void CodeStubGraphBuilderBase::HandleArrayCases(HValue* array, HValue* receiver,
HValue* name, HValue* slot,
HValue* vector,
bool keyed_load) {
+ HConstant* constant_two = Add<HConstant>(2);
+ HConstant* constant_three = Add<HConstant>(3);
+
IfBuilder if_receiver_heap_object(this);
if_receiver_heap_object.IfNot<HIsSmiAndBranch>(receiver);
if_receiver_heap_object.Then();
+ Push(AddLoadMap(receiver, nullptr));
+ if_receiver_heap_object.Else();
+ HConstant* heap_number_map =
+ Add<HConstant>(isolate()->factory()->heap_number_map());
+ Push(heap_number_map);
+ if_receiver_heap_object.End();
+ HValue* receiver_map = Pop();
+
+ HValue* start =
+ keyed_load ? graph()->GetConstant1() : graph()->GetConstant0();
+ HValue* weak_cell =
+ Add<HLoadKeyed>(array, start, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ // Load the weak cell value. It may be Smi(0), or a map. Compare nonetheless
+ // against the receiver_map.
+ HValue* array_map = Add<HLoadNamedField>(weak_cell, nullptr,
+ HObjectAccess::ForWeakCellValue());
+
+ IfBuilder if_correct_map(this);
+ if_correct_map.If<HCompareObjectEqAndBranch>(receiver_map, array_map);
+ if_correct_map.Then();
+ { TailCallHandler(receiver, name, array, start, slot, vector); }
+ if_correct_map.Else();
{
- HConstant* constant_two = Add<HConstant>(2);
- HConstant* constant_three = Add<HConstant>(3);
-
- HValue* receiver_map = AddLoadMap(receiver, nullptr);
- HValue* start =
- keyed_load ? graph()->GetConstant1() : graph()->GetConstant0();
- HValue* weak_cell = Add<HLoadKeyed>(array, start, nullptr, FAST_ELEMENTS,
- ALLOW_RETURN_HOLE);
- // Load the weak cell value. It may be Smi(0), or a map. Compare nonetheless
- // against the receiver_map.
- HValue* array_map = Add<HLoadNamedField>(weak_cell, nullptr,
- HObjectAccess::ForWeakCellValue());
-
- IfBuilder if_correct_map(this);
- if_correct_map.If<HCompareObjectEqAndBranch>(receiver_map, array_map);
- if_correct_map.Then();
- { TailCallHandler(receiver, name, array, start, slot, vector); }
- if_correct_map.Else();
+ // If our array has more elements, the ic is polymorphic. Look for the
+ // receiver map in the rest of the array.
+ HValue* length = AddLoadFixedArrayLength(array, nullptr);
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
+ constant_two);
+ start = keyed_load ? constant_three : constant_two;
+ HValue* key = builder.BeginBody(start, length, Token::LT);
{
- // If our array has more elements, the ic is polymorphic. Look for the
- // receiver map in the rest of the array.
- HValue* length = AddLoadFixedArrayLength(array, nullptr);
- LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
- constant_two);
- start = keyed_load ? constant_three : constant_two;
- HValue* key = builder.BeginBody(start, length, Token::LT);
- {
- HValue* weak_cell = Add<HLoadKeyed>(array, key, nullptr, FAST_ELEMENTS,
- ALLOW_RETURN_HOLE);
- HValue* array_map = Add<HLoadNamedField>(
- weak_cell, nullptr, HObjectAccess::ForWeakCellValue());
- IfBuilder if_correct_poly_map(this);
- if_correct_poly_map.If<HCompareObjectEqAndBranch>(receiver_map,
- array_map);
- if_correct_poly_map.Then();
- { TailCallHandler(receiver, name, array, key, slot, vector); }
- }
- builder.EndBody();
+ HValue* weak_cell = Add<HLoadKeyed>(array, key, nullptr, FAST_ELEMENTS,
+ ALLOW_RETURN_HOLE);
+ HValue* array_map = Add<HLoadNamedField>(
+ weak_cell, nullptr, HObjectAccess::ForWeakCellValue());
+ IfBuilder if_correct_poly_map(this);
+ if_correct_poly_map.If<HCompareObjectEqAndBranch>(receiver_map,
+ array_map);
+ if_correct_poly_map.Then();
+ { TailCallHandler(receiver, name, array, key, slot, vector); }
}
- if_correct_map.End();
+ builder.EndBody();
}
+ if_correct_map.End();
}
@@ -2163,15 +2203,16 @@ HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
}
array_checker.Else();
{
- // Check if the IC is in generic state.
- IfBuilder generic_checker(this);
- HConstant* generic_symbol =
- Add<HConstant>(isolate()->factory()->generic_symbol());
- generic_checker.If<HCompareObjectEqAndBranch>(feedback, generic_symbol);
- generic_checker.Then();
+ // Check if the IC is in megamorphic state.
+ IfBuilder megamorphic_checker(this);
+ HConstant* megamorphic_symbol =
+ Add<HConstant>(isolate()->factory()->megamorphic_symbol());
+ megamorphic_checker.If<HCompareObjectEqAndBranch>(feedback,
+ megamorphic_symbol);
+ megamorphic_checker.Then();
{
- // Tail-call to the generic KeyedLoadIC, treating it like a handler.
- Handle<Code> stub = KeyedLoadIC::generic_stub(isolate());
+ // Tail-call to the megamorphic KeyedLoadIC, treating it like a handler.
+ Handle<Code> stub = KeyedLoadIC::ChooseMegamorphicStub(isolate());
HValue* constant_stub = Add<HConstant>(stub);
LoadDescriptor descriptor(isolate());
HValue* op_vals[] = {context(), receiver, name};
@@ -2179,7 +2220,7 @@ HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
Vector<HValue*>(op_vals, 3), TAIL_CALL);
// We never return here, it is a tail call.
}
- generic_checker.End();
+ megamorphic_checker.End();
}
array_checker.End();
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 895569d413..6c68271bcd 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -81,6 +81,9 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
CodeCreateEvent(Logger::STUB_TAG, *code, os.str().c_str()));
Counters* counters = isolate()->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
+#ifdef DEBUG
+ code->VerifyEmbeddedObjects();
+#endif
}
@@ -265,12 +268,8 @@ MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
// Generate the uninitialized versions of the stub.
for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
- for (int mode = NO_OVERWRITE; mode <= OVERWRITE_RIGHT; ++mode) {
- BinaryOpICStub stub(isolate,
- static_cast<Token::Value>(op),
- static_cast<OverwriteMode>(mode));
- stub.GetCode();
- }
+ BinaryOpICStub stub(isolate, static_cast<Token::Value>(op));
+ stub.GetCode();
}
// Generate special versions of the stub.
@@ -679,6 +678,9 @@ void FastCloneShallowObjectStub::InitializeDescriptor(
void CreateAllocationSiteStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+void CreateWeakCellStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
void RegExpConstructResultStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
@@ -738,6 +740,12 @@ void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
}
+void CreateWeakCellStub::GenerateAheadOfTime(Isolate* isolate) {
+ CreateWeakCellStub stub(isolate);
+ stub.GetCode();
+}
+
+
void StoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind()) {
case FAST_ELEMENTS:
@@ -782,6 +790,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
}
+void RestParamAccessStub::Generate(MacroAssembler* masm) {
+ GenerateNew(masm);
+}
+
+
void ArgumentsAccessStub::PrintName(std::ostream& os) const { // NOLINT
os << "ArgumentsAccessStub_";
switch (type()) {
@@ -802,6 +815,11 @@ void ArgumentsAccessStub::PrintName(std::ostream& os) const { // NOLINT
}
+void RestParamAccessStub::PrintName(std::ostream& os) const { // NOLINT
+ os << "RestParamAccessStub_";
+}
+
+
void CallFunctionStub::PrintName(std::ostream& os) const { // NOLINT
os << "CallFunctionStub_Args" << argc();
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 8448e557f2..2ae4ba7085 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -24,6 +24,7 @@ namespace internal {
V(ArrayConstructor) \
V(BinaryOpICWithAllocationSite) \
V(CallApiFunction) \
+ V(CallApiAccessor) \
V(CallApiGetter) \
V(CallConstruct) \
V(CallFunction) \
@@ -38,6 +39,8 @@ namespace internal {
V(JSEntry) \
V(KeyedLoadICTrampoline) \
V(LoadICTrampoline) \
+ V(CallICTrampoline) \
+ V(CallIC_ArrayTrampoline) \
V(LoadIndexedInterceptor) \
V(LoadIndexedString) \
V(MathPow) \
@@ -60,6 +63,7 @@ namespace internal {
V(BinaryOpWithAllocationSite) \
V(CompareNilIC) \
V(CreateAllocationSite) \
+ V(CreateWeakCell) \
V(ElementsTransitionAndStore) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
@@ -90,7 +94,8 @@ namespace internal {
V(StoreField) \
V(StoreGlobal) \
V(StoreTransition) \
- V(StringLength)
+ V(StringLength) \
+ V(RestParamAccess)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -111,6 +116,16 @@ namespace internal {
#define CODE_STUB_LIST_ARM64(V)
#endif
+// List of code stubs only used on PPC platforms.
+#ifdef V8_TARGET_ARCH_PPC
+#define CODE_STUB_LIST_PPC(V) \
+ V(DirectCEntry) \
+ V(StoreRegistersState) \
+ V(RestoreRegistersState)
+#else
+#define CODE_STUB_LIST_PPC(V)
+#endif
+
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
@@ -127,10 +142,11 @@ namespace internal {
#endif
// Combined list of code stubs.
-#define CODE_STUB_LIST(V) \
- CODE_STUB_LIST_ALL_PLATFORMS(V) \
- CODE_STUB_LIST_ARM(V) \
- CODE_STUB_LIST_ARM64(V) \
+#define CODE_STUB_LIST(V) \
+ CODE_STUB_LIST_ALL_PLATFORMS(V) \
+ CODE_STUB_LIST_ARM(V) \
+ CODE_STUB_LIST_ARM64(V) \
+ CODE_STUB_LIST_PPC(V) \
CODE_STUB_LIST_MIPS(V)
// Stub is base classes of all stubs.
@@ -198,11 +214,12 @@ class CodeStub BASE_EMBEDDED {
virtual Major MajorKey() const = 0;
uint32_t MinorKey() const { return minor_key_; }
+ // BinaryOpStub needs to override this.
+ virtual Code::Kind GetCodeKind() const;
+
virtual InlineCacheState GetICState() const { return UNINITIALIZED; }
virtual ExtraICState GetExtraICState() const { return kNoExtraICState; }
- virtual Code::StubType GetStubType() {
- return Code::NORMAL;
- }
+ virtual Code::StubType GetStubType() const { return Code::NORMAL; }
friend std::ostream& operator<<(std::ostream& os, const CodeStub& s) {
s.PrintName(os);
@@ -246,9 +263,6 @@ class CodeStub BASE_EMBEDDED {
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
- // BinaryOpStub needs to override this.
- virtual Code::Kind GetCodeKind() const;
-
// Add the code to a specialized cache, specific to an individual
// stub type. Please note, this method must add the code object to a
// roots object, otherwise we will remove the code during GC.
@@ -281,6 +295,30 @@ class CodeStub BASE_EMBEDDED {
};
+// TODO(svenpanne) This class is only used to construct a more or less sensible
+// CompilationInfo for testing purposes, basically pretending that we are
+// currently compiling some kind of code stub. Remove this when the pipeline and
+// testing machinery is restructured in such a way that we don't have to come up
+// with a CompilationInfo out of thin air, although we only need a few parts of
+// it.
+struct FakeStubForTesting : public CodeStub {
+ explicit FakeStubForTesting(Isolate* isolate) : CodeStub(isolate) {}
+
+ // Only used by pipeline.cc's GetDebugName in DEBUG mode.
+ Major MajorKey() const OVERRIDE { return CodeStub::NoCache; }
+
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+ UNREACHABLE();
+ return CallInterfaceDescriptor();
+ }
+
+ Handle<Code> GenerateCode() OVERRIDE {
+ UNREACHABLE();
+ return Handle<Code>();
+ }
+};
+
+
#define DEFINE_CODE_STUB_BASE(NAME, SUPER) \
public: \
NAME(uint32_t key, Isolate* isolate) : SUPER(key, isolate) {} \
@@ -504,6 +542,8 @@ class RuntimeCallHelper {
#include "src/arm64/code-stubs-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/code-stubs-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/code-stubs-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/code-stubs-mips.h"
#elif V8_TARGET_ARCH_MIPS64
@@ -555,29 +595,26 @@ class NumberToStringStub FINAL : public HydrogenCodeStub {
class FastNewClosureStub : public HydrogenCodeStub {
public:
- FastNewClosureStub(Isolate* isolate, StrictMode strict_mode,
+ FastNewClosureStub(Isolate* isolate, LanguageMode language_mode,
FunctionKind kind)
: HydrogenCodeStub(isolate) {
DCHECK(IsValidFunctionKind(kind));
- set_sub_minor_key(StrictModeBits::encode(strict_mode) |
+ set_sub_minor_key(LanguageModeBits::encode(language_mode) |
FunctionKindBits::encode(kind));
}
- StrictMode strict_mode() const {
- return StrictModeBits::decode(sub_minor_key());
+ LanguageMode language_mode() const {
+ return LanguageModeBits::decode(sub_minor_key());
}
FunctionKind kind() const {
return FunctionKindBits::decode(sub_minor_key());
}
- bool is_arrow() const { return IsArrowFunction(kind()); }
- bool is_generator() const { return IsGeneratorFunction(kind()); }
- bool is_concise_method() const { return IsConciseMethod(kind()); }
- bool is_default_constructor() const { return IsDefaultConstructor(kind()); }
private:
- class StrictModeBits : public BitField<StrictMode, 0, 1> {};
- class FunctionKindBits : public BitField<FunctionKind, 1, 4> {};
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ class LanguageModeBits : public BitField<LanguageMode, 0, 2> {};
+ class FunctionKindBits : public BitField<FunctionKind, 2, 7> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
@@ -660,6 +697,17 @@ class CreateAllocationSiteStub : public HydrogenCodeStub {
};
+class CreateWeakCellStub : public HydrogenCodeStub {
+ public:
+ explicit CreateWeakCellStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateWeakCell);
+ DEFINE_HYDROGEN_CODE_STUB(CreateWeakCell, HydrogenCodeStub);
+};
+
+
class InstanceofStub: public PlatformCodeStub {
public:
enum Flags {
@@ -815,7 +863,7 @@ class CallICStub: public PlatformCodeStub {
private:
void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
- DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedback);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedbackAndVector);
DEFINE_PLATFORM_CODE_STUB(CallIC, PlatformCodeStub);
};
@@ -863,7 +911,7 @@ class LoadIndexedInterceptorStub : public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
- Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_PLATFORM_CODE_STUB(LoadIndexedInterceptor, PlatformCodeStub);
@@ -876,7 +924,7 @@ class LoadIndexedStringStub : public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
- Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_PLATFORM_CODE_STUB(LoadIndexedString, PlatformCodeStub);
@@ -916,7 +964,7 @@ class LoadFieldStub: public HandlerStub {
protected:
Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
- Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
private:
class LoadFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -932,7 +980,7 @@ class KeyedLoadSloppyArgumentsStub : public HandlerStub {
protected:
Code::Kind kind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
- Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
private:
DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
@@ -952,7 +1000,7 @@ class LoadConstantStub : public HandlerStub {
protected:
Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
- Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
private:
class ConstantIndexBits : public BitField<int, 0, kSubMinorKeyBits> {};
@@ -967,7 +1015,7 @@ class StringLengthStub: public HandlerStub {
protected:
Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
- Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
DEFINE_HANDLER_CODE_STUB(StringLength, HandlerStub);
};
@@ -996,7 +1044,7 @@ class StoreFieldStub : public HandlerStub {
protected:
Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
- Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
private:
class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -1049,7 +1097,7 @@ class StoreTransitionStub : public HandlerStub {
protected:
Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
- Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
private:
class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -1068,7 +1116,7 @@ class StoreGlobalStub : public HandlerStub {
CheckGlobalBits::encode(check_global));
}
- static Handle<HeapObject> global_placeholder(Isolate* isolate) {
+ static Handle<HeapObject> property_cell_placeholder(Isolate* isolate) {
return isolate->factory()->uninitialized_value();
}
@@ -1076,13 +1124,15 @@ class StoreGlobalStub : public HandlerStub {
Handle<PropertyCell> cell) {
if (check_global()) {
Code::FindAndReplacePattern pattern;
- pattern.Add(Handle<Map>(global_placeholder(isolate())->map()), global);
- pattern.Add(isolate()->factory()->meta_map(), Handle<Map>(global->map()));
- pattern.Add(isolate()->factory()->global_property_cell_map(), cell);
+ pattern.Add(isolate()->factory()->meta_map(),
+ Map::WeakCellForMap(Handle<Map>(global->map())));
+ pattern.Add(Handle<Map>(property_cell_placeholder(isolate())->map()),
+ isolate()->factory()->NewWeakCell(cell));
return CodeStub::GetCodeCopy(pattern);
} else {
Code::FindAndReplacePattern pattern;
- pattern.Add(isolate()->factory()->global_property_cell_map(), cell);
+ pattern.Add(Handle<Map>(property_cell_placeholder(isolate())->map()),
+ isolate()->factory()->NewWeakCell(cell));
return CodeStub::GetCodeCopy(pattern);
}
}
@@ -1117,14 +1167,40 @@ class StoreGlobalStub : public HandlerStub {
class CallApiFunctionStub : public PlatformCodeStub {
public:
- CallApiFunctionStub(Isolate* isolate,
- bool is_store,
- bool call_data_undefined,
- int argc) : PlatformCodeStub(isolate) {
+ explicit CallApiFunctionStub(Isolate* isolate, bool call_data_undefined)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = CallDataUndefinedBits::encode(call_data_undefined);
+ }
+
+ private:
+ bool call_data_undefined() const {
+ return CallDataUndefinedBits::decode(minor_key_);
+ }
+
+ class CallDataUndefinedBits : public BitField<bool, 0, 1> {};
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiFunction);
+ DEFINE_PLATFORM_CODE_STUB(CallApiFunction, PlatformCodeStub);
+};
+
+
+class CallApiAccessorStub : public PlatformCodeStub {
+ public:
+ CallApiAccessorStub(Isolate* isolate, bool is_store, bool call_data_undefined)
+ : PlatformCodeStub(isolate) {
minor_key_ = IsStoreBits::encode(is_store) |
CallDataUndefinedBits::encode(call_data_undefined) |
+ ArgumentBits::encode(is_store ? 1 : 0);
+ }
+
+ protected:
+ // For CallApiFunctionWithFixedArgsStub, see below.
+ static const int kArgBits = 3;
+ CallApiAccessorStub(Isolate* isolate, int argc, bool call_data_undefined)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = IsStoreBits::encode(false) |
+ CallDataUndefinedBits::encode(call_data_undefined) |
ArgumentBits::encode(argc);
- DCHECK(!is_store || argc == 1);
}
private:
@@ -1136,14 +1212,31 @@ class CallApiFunctionStub : public PlatformCodeStub {
class IsStoreBits: public BitField<bool, 0, 1> {};
class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
- class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {};
- STATIC_ASSERT(Code::kArgumentsBits + 2 <= kStubMinorKeyBits);
+ class ArgumentBits : public BitField<int, 2, kArgBits> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiFunction);
- DEFINE_PLATFORM_CODE_STUB(CallApiFunction, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiAccessor);
+ DEFINE_PLATFORM_CODE_STUB(CallApiAccessor, PlatformCodeStub);
};
+// TODO(dcarney): see if it's possible to remove this later without performance
+// degradation.
+// This is not a real stub, but a way of generating the CallApiAccessorStub
+// (which has the same abi) which makes it clear that it is not an accessor.
+class CallApiFunctionWithFixedArgsStub : public CallApiAccessorStub {
+ public:
+ static const int kMaxFixedArgs = (1 << kArgBits) - 1;
+ CallApiFunctionWithFixedArgsStub(Isolate* isolate, int argc,
+ bool call_data_undefined)
+ : CallApiAccessorStub(isolate, argc, call_data_undefined) {
+ DCHECK(0 <= argc && argc <= kMaxFixedArgs);
+ }
+};
+
+
+typedef ApiAccessorDescriptor ApiFunctionWithFixedArgsDescriptor;
+
+
class CallApiGetterStub : public PlatformCodeStub {
public:
explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
@@ -1155,10 +1248,9 @@ class CallApiGetterStub : public PlatformCodeStub {
class BinaryOpICStub : public HydrogenCodeStub {
public:
- BinaryOpICStub(Isolate* isolate, Token::Value op,
- OverwriteMode mode = NO_OVERWRITE)
+ BinaryOpICStub(Isolate* isolate, Token::Value op)
: HydrogenCodeStub(isolate, UNINITIALIZED) {
- BinaryOpICState state(isolate, op, mode);
+ BinaryOpICState state(isolate, op);
set_sub_minor_key(state.GetExtraICState());
}
@@ -1239,10 +1331,8 @@ class BinaryOpICWithAllocationSiteStub FINAL : public PlatformCodeStub {
class BinaryOpWithAllocationSiteStub FINAL : public BinaryOpICStub {
public:
- BinaryOpWithAllocationSiteStub(Isolate* isolate,
- Token::Value op,
- OverwriteMode mode)
- : BinaryOpICStub(isolate, op, mode) {}
+ BinaryOpWithAllocationSiteStub(Isolate* isolate, Token::Value op)
+ : BinaryOpICStub(isolate, op) {}
BinaryOpWithAllocationSiteStub(Isolate* isolate, const BinaryOpICState& state)
: BinaryOpICStub(isolate, state) {}
@@ -1460,7 +1550,7 @@ class CEntryStub : public PlatformCodeStub {
: PlatformCodeStub(isolate) {
minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs);
DCHECK(result_size == 1 || result_size == 2);
-#ifdef _WIN64
+#if _WIN64 || V8_TARGET_ARCH_PPC
minor_key_ = ResultSizeBits::update(minor_key_, result_size);
#endif // _WIN64
}
@@ -1473,7 +1563,7 @@ class CEntryStub : public PlatformCodeStub {
private:
bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
-#ifdef _WIN64
+#if _WIN64 || V8_TARGET_ARCH_PPC
int result_size() const { return ResultSizeBits::decode(minor_key_); }
#endif // _WIN64
@@ -1525,8 +1615,13 @@ class ArgumentsAccessStub: public PlatformCodeStub {
NEW_STRICT
};
- ArgumentsAccessStub(Isolate* isolate, Type type) : PlatformCodeStub(isolate) {
- minor_key_ = TypeBits::encode(type);
+ enum HasNewTarget { NO_NEW_TARGET, HAS_NEW_TARGET };
+
+ ArgumentsAccessStub(Isolate* isolate, Type type,
+ HasNewTarget has_new_target = NO_NEW_TARGET)
+ : PlatformCodeStub(isolate) {
+ minor_key_ =
+ TypeBits::encode(type) | HasNewTargetBits::encode(has_new_target);
}
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
@@ -1538,6 +1633,9 @@ class ArgumentsAccessStub: public PlatformCodeStub {
private:
Type type() const { return TypeBits::decode(minor_key_); }
+ bool has_new_target() const {
+ return HasNewTargetBits::decode(minor_key_) == HAS_NEW_TARGET;
+ }
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewStrict(MacroAssembler* masm);
@@ -1547,11 +1645,29 @@ class ArgumentsAccessStub: public PlatformCodeStub {
void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
class TypeBits : public BitField<Type, 0, 2> {};
+ class HasNewTargetBits : public BitField<HasNewTarget, 2, 1> {};
DEFINE_PLATFORM_CODE_STUB(ArgumentsAccess, PlatformCodeStub);
};
+class RestParamAccessStub: public PlatformCodeStub {
+ public:
+ explicit RestParamAccessStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
+
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+ return ContextOnlyDescriptor(isolate());
+ }
+
+ private:
+ void GenerateNew(MacroAssembler* masm);
+
+ virtual void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
+
+ DEFINE_PLATFORM_CODE_STUB(RestParamAccess, PlatformCodeStub);
+};
+
+
class RegExpExecStub: public PlatformCodeStub {
public:
explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
@@ -1628,9 +1744,13 @@ class CallConstructStub: public PlatformCodeStub {
return (flags() & RECORD_CONSTRUCTOR_TARGET) != 0;
}
+ bool IsSuperConstructorCall() const {
+ return (flags() & SUPER_CONSTRUCTOR_CALL) != 0;
+ }
+
void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
- class FlagBits : public BitField<CallConstructorFlags, 0, 1> {};
+ class FlagBits : public BitField<CallConstructorFlags, 0, 2> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallConstruct);
DEFINE_PLATFORM_CODE_STUB(CallConstruct, PlatformCodeStub);
@@ -1881,6 +2001,41 @@ class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
};
+class CallICTrampolineStub : public PlatformCodeStub {
+ public:
+ CallICTrampolineStub(Isolate* isolate, const CallICState& state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
+ }
+
+ Code::Kind GetCodeKind() const OVERRIDE { return Code::CALL_IC; }
+
+ InlineCacheState GetICState() const FINAL { return DEFAULT; }
+
+ ExtraICState GetExtraICState() const FINAL {
+ return static_cast<ExtraICState>(minor_key_);
+ }
+
+ protected:
+ CallICState state() const {
+ return CallICState(static_cast<ExtraICState>(minor_key_));
+ }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedback);
+ DEFINE_PLATFORM_CODE_STUB(CallICTrampoline, PlatformCodeStub);
+};
+
+
+class CallIC_ArrayTrampolineStub : public CallICTrampolineStub {
+ public:
+ CallIC_ArrayTrampolineStub(Isolate* isolate, const CallICState& state)
+ : CallICTrampolineStub(isolate, state) {}
+
+ private:
+ DEFINE_PLATFORM_CODE_STUB(CallIC_ArrayTrampoline, CallICTrampolineStub);
+};
+
+
class MegamorphicLoadStub : public HydrogenCodeStub {
public:
MegamorphicLoadStub(Isolate* isolate, const LoadICState& state)
@@ -2017,7 +2172,7 @@ class ScriptContextFieldStub : public HandlerStub {
class SlotIndexBits
: public BitField<int, kContextIndexBits, kSlotIndexBits> {};
- Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, HandlerStub);
};
diff --git a/deps/v8/src/code.h b/deps/v8/src/code.h
index d0a5fec61f..a0639e8deb 100644
--- a/deps/v8/src/code.h
+++ b/deps/v8/src/code.h
@@ -24,7 +24,8 @@ class ParameterCount BASE_EMBEDDED {
explicit ParameterCount(int immediate)
: reg_(no_reg), immediate_(immediate) { }
explicit ParameterCount(Handle<JSFunction> f)
- : reg_(no_reg), immediate_(f->shared()->formal_parameter_count()) { }
+ : reg_(no_reg),
+ immediate_(f->shared()->internal_formal_parameter_count()) {}
bool is_reg() const { return !reg_.is(no_reg); }
bool is_immediate() const { return !is_reg(); }
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 627e8362e6..178ba4a69d 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -4,6 +4,9 @@
#include "src/v8.h"
+#if defined(V8_OS_AIX)
+#include <fenv.h>
+#endif
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compiler.h"
@@ -48,7 +51,15 @@ double modulo(double x, double y) {
#else // POSIX
double modulo(double x, double y) {
+#if defined(V8_OS_AIX)
+ // AIX raises an underflow exception for (Number.MIN_VALUE % Number.MAX_VALUE)
+ feclearexcept(FE_ALL_EXCEPT);
+ double result = std::fmod(x, y);
+ int exception = fetestexcept(FE_UNDERFLOW);
+ return (exception ? x : result);
+#else
return std::fmod(x, y);
+#endif
}
#endif // defined(_WIN64)
@@ -125,12 +136,13 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
#ifdef DEBUG
if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(info->zone()).PrintProgram(info->function()));
+ PrettyPrinter(info->isolate(), info->zone())
+ .PrintProgram(info->function()));
}
if (!info->IsStub() && print_ast) {
- PrintF("--- AST ---\n%s\n",
- AstPrinter(info->zone()).PrintProgram(info->function()));
+ PrintF("--- AST ---\n%s\n", AstPrinter(info->isolate(), info->zone())
+ .PrintProgram(info->function()));
}
#endif // DEBUG
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index ba99a404a3..0e0cf1d294 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -53,6 +53,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#include "src/arm64/codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/codegen-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 6c9f95a985..b696ea5507 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -111,12 +111,11 @@ CompilationCacheScript::CompilationCacheScript(Isolate* isolate,
// We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues
// when reporting errors, etc.
-bool CompilationCacheScript::HasOrigin(
- Handle<SharedFunctionInfo> function_info,
- Handle<Object> name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin) {
+bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
+ Handle<Object> name, int line_offset,
+ int column_offset,
+ bool is_embedder_debug_script,
+ bool is_shared_cross_origin) {
Handle<Script> script =
Handle<Script>(Script::cast(function_info->script()), isolate());
// If the script name isn't set, the boilerplate script should have
@@ -129,6 +128,10 @@ bool CompilationCacheScript::HasOrigin(
if (column_offset != script->column_offset()->value()) return false;
// Check that both names are strings. If not, no match.
if (!name->IsString() || !script->name()->IsString()) return false;
+ // Were both scripts tagged by the embedder as being internal script?
+ if (is_embedder_debug_script != script->is_embedder_debug_script()) {
+ return false;
+ }
// Were both scripts tagged by the embedder as being shared cross-origin?
if (is_shared_cross_origin != script->is_shared_cross_origin()) return false;
// Compare the two name strings for equality.
@@ -142,12 +145,10 @@ bool CompilationCacheScript::HasOrigin(
// will be cached, but subsequent code from different source / line
// won't.
Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
- Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context) {
+ Handle<String> source, Handle<Object> name, int line_offset,
+ int column_offset, bool is_embedder_debug_script,
+ bool is_shared_cross_origin, Handle<Context> context,
+ LanguageMode language_mode) {
Object* result = NULL;
int generation;
@@ -156,17 +157,14 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
{ HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- Handle<Object> probe = table->Lookup(source, context);
+ Handle<Object> probe = table->Lookup(source, context, language_mode);
if (probe->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe);
// Break when we've found a suitable shared function info that
// matches the origin.
- if (HasOrigin(function_info,
- name,
- line_offset,
- column_offset,
- is_shared_cross_origin)) {
+ if (HasOrigin(function_info, name, line_offset, column_offset,
+ is_embedder_debug_script, is_shared_cross_origin)) {
result = *function_info;
break;
}
@@ -180,14 +178,11 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
if (result != NULL) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
isolate());
- DCHECK(HasOrigin(shared,
- name,
- line_offset,
- column_offset,
- is_shared_cross_origin));
+ DCHECK(HasOrigin(shared, name, line_offset, column_offset,
+ is_embedder_debug_script, is_shared_cross_origin));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
- if (generation != 0) Put(source, context, shared);
+ if (generation != 0) Put(source, context, language_mode, shared);
isolate()->counters()->compilation_cache_hits()->Increment();
return shared;
} else {
@@ -199,17 +194,18 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
void CompilationCacheScript::Put(Handle<String> source,
Handle<Context> context,
+ LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
- SetFirstTable(
- CompilationCacheTable::Put(table, source, context, function_info));
+ SetFirstTable(CompilationCacheTable::Put(table, source, context,
+ language_mode, function_info));
}
MaybeHandle<SharedFunctionInfo> CompilationCacheEval::Lookup(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- StrictMode strict_mode, int scope_position) {
+ LanguageMode language_mode, int scope_position) {
HandleScope scope(isolate());
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
@@ -218,7 +214,8 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheEval::Lookup(
int generation;
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupEval(source, outer_info, strict_mode, scope_position);
+ result =
+ table->LookupEval(source, outer_info, language_mode, scope_position);
if (result->IsSharedFunctionInfo()) break;
}
if (result->IsSharedFunctionInfo()) {
@@ -295,31 +292,30 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
- Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context) {
+ Handle<String> source, Handle<Object> name, int line_offset,
+ int column_offset, bool is_embedder_debug_script,
+ bool is_shared_cross_origin, Handle<Context> context,
+ LanguageMode language_mode) {
if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
return script_.Lookup(source, name, line_offset, column_offset,
- is_shared_cross_origin, context);
+ is_embedder_debug_script, is_shared_cross_origin,
+ context, language_mode);
}
MaybeHandle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, StrictMode strict_mode, int scope_position) {
+ Handle<Context> context, LanguageMode language_mode, int scope_position) {
if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
MaybeHandle<SharedFunctionInfo> result;
if (context->IsNativeContext()) {
result =
- eval_global_.Lookup(source, outer_info, strict_mode, scope_position);
+ eval_global_.Lookup(source, outer_info, language_mode, scope_position);
} else {
DCHECK(scope_position != RelocInfo::kNoPosition);
- result = eval_contextual_.Lookup(source, outer_info, strict_mode,
+ result = eval_contextual_.Lookup(source, outer_info, language_mode,
scope_position);
}
return result;
@@ -336,10 +332,11 @@ MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source,
Handle<Context> context,
+ LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
- script_.Put(source, context, function_info);
+ script_.Put(source, context, language_mode, function_info);
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index a7c84b7b5d..1a2608e3aa 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -72,22 +72,21 @@ class CompilationCacheScript : public CompilationSubCache {
public:
CompilationCacheScript(Isolate* isolate, int generations);
- Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset,
+ Handle<SharedFunctionInfo> Lookup(Handle<String> source, Handle<Object> name,
+ int line_offset, int column_offset,
+ bool is_embedder_debug_script,
bool is_shared_cross_origin,
- Handle<Context> context);
+ Handle<Context> context,
+ LanguageMode language_mode);
void Put(Handle<String> source,
Handle<Context> context,
+ LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info);
private:
- bool HasOrigin(Handle<SharedFunctionInfo> function_info,
- Handle<Object> name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin);
+ bool HasOrigin(Handle<SharedFunctionInfo> function_info, Handle<Object> name,
+ int line_offset, int column_offset,
+ bool is_embedder_debug_script, bool is_shared_cross_origin);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
};
@@ -112,7 +111,7 @@ class CompilationCacheEval: public CompilationSubCache {
MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
- StrictMode strict_mode,
+ LanguageMode language_mode,
int scope_position);
void Put(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
@@ -150,14 +149,16 @@ class CompilationCache {
// script for the given source string with the right origin.
MaybeHandle<SharedFunctionInfo> LookupScript(
Handle<String> source, Handle<Object> name, int line_offset,
- int column_offset, bool is_shared_cross_origin, Handle<Context> context);
+ int column_offset, bool is_embedder_debug_script,
+ bool is_shared_cross_origin, Handle<Context> context,
+ LanguageMode language_mode);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
MaybeHandle<SharedFunctionInfo> LookupEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, StrictMode strict_mode, int scope_position);
+ Handle<Context> context, LanguageMode language_mode, int scope_position);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
@@ -168,6 +169,7 @@ class CompilationCache {
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source,
Handle<Context> context,
+ LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 3b52aa2d3b..d794ae2b20 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -7,7 +7,6 @@
#include "src/compiler.h"
#include "src/ast-numbering.h"
-#include "src/ast-this-access-visitor.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
@@ -23,6 +22,7 @@
#include "src/liveedit.h"
#include "src/messages.h"
#include "src/parser.h"
+#include "src/prettyprinter.h"
#include "src/rewriter.h"
#include "src/runtime-profiler.h"
#include "src/scanner-character-streams.h"
@@ -35,6 +35,17 @@ namespace v8 {
namespace internal {
+std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
+ if (p.IsUnknown()) {
+ return os << "<?>";
+ } else if (FLAG_hydrogen_track_positions) {
+ return os << "<" << p.inlining_id() << ":" << p.position() << ">";
+ } else {
+ return os << "<0:" << p.raw() << ">";
+ }
+}
+
+
ScriptData::ScriptData(const byte* data, int length)
: owns_data_(false), rejected_(false), data_(data), length_(length) {
if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
@@ -56,25 +67,12 @@ CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false) {
+ aborted_due_to_dependency_change_(false),
+ osr_expr_stack_height_(0) {
Initialize(script->GetIsolate(), BASE, zone);
}
-CompilationInfo::CompilationInfo(Isolate* isolate, Zone* zone)
- : flags_(kThisHasUses),
- script_(Handle<Script>::null()),
- source_stream_(NULL),
- osr_ast_id_(BailoutId::None()),
- parameter_count_(0),
- optimization_id_(-1),
- ast_value_factory_(NULL),
- ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false) {
- Initialize(isolate, STUB, zone);
-}
-
-
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
: flags_(kLazy | kThisHasUses),
@@ -86,7 +84,8 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false) {
+ aborted_due_to_dependency_change_(false),
+ osr_expr_stack_height_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -103,13 +102,13 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false) {
+ aborted_due_to_dependency_change_(false),
+ osr_expr_stack_height_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
-CompilationInfo::CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate,
- Zone* zone)
+CompilationInfo::CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone)
: flags_(kLazy | kThisHasUses),
source_stream_(NULL),
osr_ast_id_(BailoutId::None()),
@@ -117,7 +116,8 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate,
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false) {
+ aborted_due_to_dependency_change_(false),
+ osr_expr_stack_height_(0) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@@ -135,7 +135,8 @@ CompilationInfo::CompilationInfo(
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false) {
+ aborted_due_to_dependency_change_(false),
+ osr_expr_stack_height_(0) {
Initialize(isolate, BASE, zone);
}
@@ -157,6 +158,14 @@ void CompilationInfo::Initialize(Isolate* isolate,
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
+ if (FLAG_hydrogen_track_positions) {
+ inlined_function_infos_ = new List<InlinedFunctionInfo>(5);
+ inlining_id_to_function_id_ = new List<int>(5);
+ } else {
+ inlined_function_infos_ = NULL;
+ inlining_id_to_function_id_ = NULL;
+ }
+
for (int i = 0; i < DependentCode::kGroupCount; i++) {
dependencies_[i] = NULL;
}
@@ -179,13 +188,14 @@ void CompilationInfo::Initialize(Isolate* isolate,
if (isolate_->debug()->is_active()) MarkAsDebug();
if (FLAG_context_specialization) MarkAsContextSpecializing();
if (FLAG_turbo_inlining) MarkAsInliningEnabled();
+ if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
if (FLAG_turbo_types) MarkAsTypingEnabled();
if (!shared_info_.is_null()) {
- DCHECK(strict_mode() == SLOPPY);
- SetStrictMode(shared_info_->strict_mode());
+ DCHECK(is_sloppy(language_mode()));
+ SetLanguageMode(shared_info_->language_mode());
}
- bailout_reason_ = kUnknown;
+ bailout_reason_ = kNoReason;
if (!shared_info().is_null() && shared_info()->is_compiled()) {
// We should initialize the CompilationInfo feedback vector from the
@@ -202,18 +212,30 @@ CompilationInfo::~CompilationInfo() {
}
delete deferred_handles_;
delete no_frame_ranges_;
+ delete inlined_function_infos_;
+ delete inlining_id_to_function_id_;
if (ast_value_factory_owned_) delete ast_value_factory_;
#ifdef DEBUG
// Check that no dependent maps have been added or added dependent maps have
// been rolled back or committed.
for (int i = 0; i < DependentCode::kGroupCount; i++) {
- DCHECK_EQ(NULL, dependencies_[i]);
+ DCHECK(!dependencies_[i]);
}
#endif // DEBUG
}
void CompilationInfo::CommitDependencies(Handle<Code> code) {
+ bool has_dependencies = false;
+ for (int i = 0; i < DependentCode::kGroupCount; i++) {
+ has_dependencies |=
+ dependencies_[i] != NULL && dependencies_[i]->length() > 0;
+ }
+ // Avoid creating a weak cell for code with no dependencies.
+ if (!has_dependencies) return;
+
+ AllowDeferredHandleDereference get_object_wrapper;
+ WeakCell* cell = *Code::WeakCellFor(code);
for (int i = 0; i < DependentCode::kGroupCount; i++) {
ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
if (group_objects == NULL) continue;
@@ -221,9 +243,10 @@ void CompilationInfo::CommitDependencies(Handle<Code> code) {
for (int j = 0; j < group_objects->length(); j++) {
DependentCode::DependencyGroup group =
static_cast<DependentCode::DependencyGroup>(i);
+ Foreign* info = *object_wrapper();
DependentCode* dependent_code =
DependentCode::ForObject(group_objects->at(j), group);
- dependent_code->UpdateToFinishedCode(group, this, *code);
+ dependent_code->UpdateToFinishedCode(group, info, cell);
}
dependencies_[i] = NULL; // Zone-allocated, no need to delete.
}
@@ -231,6 +254,7 @@ void CompilationInfo::CommitDependencies(Handle<Code> code) {
void CompilationInfo::RollbackDependencies() {
+ AllowDeferredHandleDereference get_object_wrapper;
// Unregister from all dependent maps if not yet committed.
for (int i = 0; i < DependentCode::kGroupCount; i++) {
ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
@@ -238,9 +262,10 @@ void CompilationInfo::RollbackDependencies() {
for (int j = 0; j < group_objects->length(); j++) {
DependentCode::DependencyGroup group =
static_cast<DependentCode::DependencyGroup>(i);
+ Foreign* info = *object_wrapper();
DependentCode* dependent_code =
DependentCode::ForObject(group_objects->at(j), group);
- dependent_code->RemoveCompilationInfo(group, this);
+ dependent_code->RemoveCompilationInfo(group, info);
}
dependencies_[i] = NULL; // Zone-allocated, no need to delete.
}
@@ -304,6 +329,66 @@ void CompilationInfo::EnsureFeedbackVector() {
}
+bool CompilationInfo::is_simple_parameter_list() {
+ return scope_->is_simple_parameter_list();
+}
+
+
+int CompilationInfo::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ SourcePosition position) {
+ if (!FLAG_hydrogen_track_positions) {
+ return 0;
+ }
+
+ DCHECK(inlined_function_infos_);
+ DCHECK(inlining_id_to_function_id_);
+ int id = 0;
+ for (; id < inlined_function_infos_->length(); id++) {
+ if (inlined_function_infos_->at(id).shared().is_identical_to(shared)) {
+ break;
+ }
+ }
+ if (id == inlined_function_infos_->length()) {
+ inlined_function_infos_->Add(InlinedFunctionInfo(shared));
+
+ if (!shared->script()->IsUndefined()) {
+ Handle<Script> script(Script::cast(shared->script()));
+ if (!script->source()->IsUndefined()) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get()
+ << ") id{" << optimization_id() << "," << id << "} ---\n";
+ {
+ DisallowHeapAllocation no_allocation;
+ int start = shared->start_position();
+ int len = shared->end_position() - start;
+ String::SubStringRange source(String::cast(script->source()), start,
+ len);
+ for (const auto& c : source) {
+ os << AsReversiblyEscapedUC16(c);
+ }
+ }
+
+ os << "\n--- END ---\n";
+ }
+ }
+ }
+
+ int inline_id = inlining_id_to_function_id_->length();
+ inlining_id_to_function_id_->Add(id);
+
+ if (inline_id != 0) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
+ << optimization_id() << "," << id << "} AS " << inline_id << " AT "
+ << position << std::endl;
+ }
+
+ return inline_id;
+}
+
+
class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
@@ -344,14 +429,8 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
DCHECK(info()->IsOptimizing());
DCHECK(!info()->IsCompilingForDebugging());
- // Optimization could have been disabled by the parser.
- if (info()->shared_info()->optimization_disabled()) {
- return AbortOptimization(
- info()->shared_info()->disable_optimization_reason());
- }
-
- // Do not use crankshaft if we need to be able to set break points.
- if (isolate()->DebuggerHasBreakPoints()) {
+ // Do not use Crankshaft/TurboFan if we need to be able to set break points.
+ if (isolate()->debug()->has_break_points()) {
return RetryOptimization(kDebuggerHasBreakPoints);
}
@@ -418,7 +497,9 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
if (FLAG_trace_opt) {
OFStream os(stdout);
os << "[compiling method " << Brief(*info()->closure())
- << " using TurboFan]" << std::endl;
+ << " using TurboFan";
+ if (info()->is_osr()) os << " OSR";
+ os << "]" << std::endl;
}
Timer t(this, &time_taken_to_create_graph_);
compiler::Pipeline pipeline(info());
@@ -431,7 +512,9 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
if (FLAG_trace_opt) {
OFStream os(stdout);
os << "[compiling method " << Brief(*info()->closure())
- << " using Crankshaft]" << std::endl;
+ << " using Crankshaft";
+ if (info()->is_osr()) os << " OSR";
+ os << "]" << std::endl;
}
if (FLAG_trace_hydrogen) {
@@ -441,6 +524,13 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Type-check the function.
AstTyper::Run(info());
+ // Optimization could have been disabled by the parser. Note that this check
+ // is only needed because the Hydrogen graph builder is missing some bailouts.
+ if (info()->shared_info()->optimization_disabled()) {
+ return AbortOptimization(
+ info()->shared_info()->disable_optimization_reason());
+ }
+
graph_builder_ = (FLAG_hydrogen_track_positions || FLAG_trace_ic)
? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info());
@@ -597,42 +687,6 @@ static void MaybeDisableOptimization(Handle<SharedFunctionInfo> shared_info,
}
-// Sets the function info on a function.
-// The start_position points to the first '(' character after the function name
-// in the full script source. When counting characters in the script source the
-// the first character is number 0 (not 1).
-static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script) {
- function_info->set_length(lit->parameter_count());
- function_info->set_formal_parameter_count(lit->parameter_count());
- function_info->set_script(*script);
- function_info->set_function_token_position(lit->function_token_position());
- function_info->set_start_position(lit->start_position());
- function_info->set_end_position(lit->end_position());
- function_info->set_is_expression(lit->is_expression());
- function_info->set_is_anonymous(lit->is_anonymous());
- function_info->set_is_toplevel(is_toplevel);
- function_info->set_inferred_name(*lit->inferred_name());
- function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
- function_info->set_allows_lazy_compilation_without_context(
- lit->AllowsLazyCompilationWithoutContext());
- function_info->set_strict_mode(lit->strict_mode());
- function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
- function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
- function_info->set_ast_node_count(lit->ast_node_count());
- function_info->set_is_function(lit->is_function());
- MaybeDisableOptimization(function_info, lit->dont_optimize_reason());
- function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
- function_info->set_kind(lit->kind());
- function_info->set_uses_super_property(lit->uses_super_property());
- function_info->set_uses_super_constructor_call(
- lit->uses_super_constructor_call());
- function_info->set_asm_function(lit->scope()->asm_function());
-}
-
-
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CompilationInfo* info,
Handle<SharedFunctionInfo> shared) {
@@ -684,10 +738,10 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
PostponeInterruptsScope postpone(info->isolate());
// Parse and update CompilationInfo with the results.
- if (!Parser::Parse(info)) return MaybeHandle<Code>();
+ if (!Parser::ParseStatic(info)) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info->shared_info();
FunctionLiteral* lit = info->function();
- shared->set_strict_mode(lit->strict_mode());
+ shared->set_language_mode(lit->language_mode());
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
MaybeDisableOptimization(shared, lit->dont_optimize_reason());
@@ -699,7 +753,8 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
// Update the shared function info with the scope info. Allocating the
// ScopeInfo object may cause a GC.
- Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(), info->zone());
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
shared->set_scope_info(*scope_info);
// Update the code and feedback vector for the shared function info.
@@ -760,7 +815,10 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
static bool Renumber(CompilationInfo* info) {
- if (!AstNumbering::Renumber(info->function(), info->zone())) return false;
+ if (!AstNumbering::Renumber(info->isolate(), info->zone(),
+ info->function())) {
+ return false;
+ }
if (!info->shared_info().is_null()) {
FunctionLiteral* lit = info->function();
info->shared_info()->set_ast_node_count(lit->ast_node_count());
@@ -771,89 +829,18 @@ static bool Renumber(CompilationInfo* info) {
}
-static void ThrowSuperConstructorCheckError(CompilationInfo* info,
- Statement* stmt) {
- MaybeHandle<Object> obj = info->isolate()->factory()->NewTypeError(
- "super_constructor_call", HandleVector<Object>(nullptr, 0));
- Handle<Object> exception;
- if (!obj.ToHandle(&exception)) return;
-
- MessageLocation location(info->script(), stmt->position(), stmt->position());
- USE(info->isolate()->Throw(*exception, &location));
-}
-
-
-static bool CheckSuperConstructorCall(CompilationInfo* info) {
- FunctionLiteral* function = info->function();
- if (!function->uses_super_constructor_call()) return true;
-
- if (function->is_default_constructor()) return true;
-
- ZoneList<Statement*>* body = function->body();
- CHECK(body->length() > 0);
-
- int super_call_index = 0;
- // Allow 'use strict' and similiar and empty statements.
- while (true) {
- CHECK(super_call_index < body->length()); // We know there is a super call.
- Statement* stmt = body->at(super_call_index);
- if (stmt->IsExpressionStatement() &&
- stmt->AsExpressionStatement()->expression()->IsLiteral()) {
- super_call_index++;
- continue;
- }
- if (stmt->IsEmptyStatement()) {
- super_call_index++;
- continue;
- }
- break;
- }
-
- Statement* stmt = body->at(super_call_index);
- ExpressionStatement* exprStm = stmt->AsExpressionStatement();
- if (exprStm == nullptr) {
- ThrowSuperConstructorCheckError(info, stmt);
- return false;
- }
- Call* callExpr = exprStm->expression()->AsCall();
- if (callExpr == nullptr) {
- ThrowSuperConstructorCheckError(info, stmt);
- return false;
- }
-
- if (!callExpr->expression()->IsSuperReference()) {
- ThrowSuperConstructorCheckError(info, stmt);
- return false;
- }
-
- ZoneList<Expression*>* arguments = callExpr->arguments();
-
- AstThisAccessVisitor this_access_visitor(info->zone());
- this_access_visitor.VisitExpressions(arguments);
-
- if (this_access_visitor.HasStackOverflow()) return false;
- if (this_access_visitor.UsesThis()) {
- ThrowSuperConstructorCheckError(info, stmt);
- return false;
- }
-
- return true;
-}
-
-
bool Compiler::Analyze(CompilationInfo* info) {
DCHECK(info->function() != NULL);
if (!Rewriter::Rewrite(info)) return false;
if (!Scope::Analyze(info)) return false;
if (!Renumber(info)) return false;
DCHECK(info->scope() != NULL);
- if (!CheckSuperConstructorCall(info)) return false;
return true;
}
bool Compiler::ParseAndAnalyze(CompilationInfo* info) {
- if (!Parser::Parse(info)) return false;
+ if (!Parser::ParseStatic(info)) return false;
return Compiler::Analyze(info);
}
@@ -939,6 +926,7 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
DCHECK(!isolate->has_pending_exception());
DCHECK(!function->is_compiled());
+ AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
// If the debugger is active, do not compile with turbofan unless we can
// deopt from turbofan code.
if (FLAG_turbo_asm && function->shared()->asm_function() &&
@@ -955,6 +943,9 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
DCHECK(function->shared()->is_compiled());
return info.code();
}
+ // We have failed compilation. If there was an exception clear it so that
+ // we can compile unoptimized code.
+ if (isolate->has_pending_exception()) isolate->clear_pending_exception();
}
if (function->shared()->is_compiled()) {
@@ -966,9 +957,7 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCodeCommon(&info),
Code);
- if (FLAG_always_opt && isolate->use_crankshaft() &&
- !info.shared_info()->optimization_disabled() &&
- !isolate->DebuggerHasBreakPoints()) {
+ if (FLAG_always_opt && isolate->use_crankshaft()) {
Handle<Code> opt_code;
if (Compiler::GetOptimizedCode(
function, result,
@@ -1038,7 +1027,7 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
// function is inlined before being called for the first time.
if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
Handle<ScopeInfo> target_scope_info =
- ScopeInfo::Create(info->scope(), info->zone());
+ ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
shared->set_scope_info(*target_scope_info);
}
@@ -1095,13 +1084,13 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
VMState<COMPILER> state(info.isolate());
info.MarkAsGlobal();
- if (!Parser::Parse(&info)) return;
+ if (!Parser::ParseStatic(&info)) return;
LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (!CompileUnoptimizedCode(&info)) return;
if (!info.shared_info().is_null()) {
- Handle<ScopeInfo> scope_info = ScopeInfo::Create(info.scope(),
- info.zone());
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
info.shared_info()->set_scope_info(*scope_info);
}
tracker.RecordRootFunctionInfo(info.code());
@@ -1120,7 +1109,7 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
isolate->debug()->OnBeforeCompile(script);
- DCHECK(info->is_eval() || info->is_global());
+ DCHECK(info->is_eval() || info->is_global() || info->is_module());
info->MarkAsToplevel();
@@ -1145,7 +1134,7 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
// data while parsing eagerly is not implemented.
info->SetCachedData(NULL, ScriptCompiler::kNoCompileOptions);
}
- if (!Parser::Parse(info, parse_allow_lazy)) {
+ if (!Parser::ParseStatic(info, parse_allow_lazy)) {
return Handle<SharedFunctionInfo>::null();
}
}
@@ -1170,11 +1159,14 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
DCHECK(!info->code().is_null());
result = isolate->factory()->NewSharedFunctionInfo(
lit->name(), lit->materialized_literal_count(), lit->kind(),
- info->code(), ScopeInfo::Create(info->scope(), info->zone()),
+ info->code(),
+ ScopeInfo::Create(info->isolate(), info->zone(), info->scope()),
info->feedback_vector());
DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- SetFunctionInfo(result, lit, true, script);
+ SharedFunctionInfo::InitFromFunctionLiteral(result, lit);
+ result->set_script(*script);
+ result->set_is_toplevel(true);
Handle<String> script_name = script->name()->IsString()
? Handle<String>(String::cast(script->name()))
@@ -1207,7 +1199,7 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, StrictMode strict_mode,
+ Handle<Context> context, LanguageMode language_mode,
ParseRestriction restriction, int scope_position) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
@@ -1216,7 +1208,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
CompilationCache* compilation_cache = isolate->compilation_cache();
MaybeHandle<SharedFunctionInfo> maybe_shared_info =
- compilation_cache->LookupEval(source, outer_info, context, strict_mode,
+ compilation_cache->LookupEval(source, outer_info, context, language_mode,
scope_position);
Handle<SharedFunctionInfo> shared_info;
@@ -1225,7 +1217,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
CompilationInfoWithZone info(script);
info.MarkAsEval();
if (context->IsNativeContext()) info.MarkAsGlobal();
- info.SetStrictMode(strict_mode);
+ info.SetLanguageMode(language_mode);
info.SetParseRestriction(restriction);
info.SetContext(context);
@@ -1238,10 +1230,13 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
} else {
// Explicitly disable optimization for eval code. We're not yet prepared
// to handle eval-code in the optimizing compiler.
- shared_info->DisableOptimization(kEval);
+ if (restriction != ONLY_SINGLE_FUNCTION_LITERAL) {
+ shared_info->DisableOptimization(kEval);
+ }
// If caller is strict mode, the result must be in strict mode as well.
- DCHECK(strict_mode == SLOPPY || shared_info->strict_mode() == STRICT);
+ DCHECK(is_sloppy(language_mode) ||
+ is_strict(shared_info->language_mode()));
if (!shared_info->dont_cache()) {
compilation_cache->PutEval(source, outer_info, context, shared_info,
scope_position);
@@ -1258,9 +1253,11 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<SharedFunctionInfo> Compiler::CompileScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
- int column_offset, bool is_shared_cross_origin, Handle<Context> context,
+ int column_offset, bool is_embedder_debug_script,
+ bool is_shared_cross_origin, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
+ ScriptCompiler::CompileOptions compile_options, NativesFlag natives,
+ bool is_module) {
Isolate* isolate = source->GetIsolate();
if (compile_options == ScriptCompiler::kNoCompileOptions) {
cached_data = NULL;
@@ -1279,26 +1276,37 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
+ // TODO(rossberg): The natives do not yet obey strong mode rules
+ // (for example, some macros use '==').
+ bool use_strong = FLAG_use_strong && !isolate->bootstrapper()->IsActive();
+ LanguageMode language_mode =
+ construct_language_mode(FLAG_use_strict, use_strong);
+
CompilationCache* compilation_cache = isolate->compilation_cache();
// Do a lookup in the compilation cache but not for extensions.
MaybeHandle<SharedFunctionInfo> maybe_result;
Handle<SharedFunctionInfo> result;
if (extension == NULL) {
- if (FLAG_serialize_toplevel &&
+ // First check per-isolate compilation cache.
+ maybe_result = compilation_cache->LookupScript(
+ source, script_name, line_offset, column_offset,
+ is_embedder_debug_script, is_shared_cross_origin, context,
+ language_mode);
+ if (maybe_result.is_null() && FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kConsumeCodeCache &&
!isolate->debug()->is_loaded()) {
+ // Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
Handle<SharedFunctionInfo> result;
if (CodeSerializer::Deserialize(isolate, *cached_data, source)
.ToHandle(&result)) {
+ // Promote to per-isolate compilation cache.
+ DCHECK(!result->dont_cache());
+ compilation_cache->PutScript(source, context, language_mode, result);
return result;
}
// Deserializer failed. Fall through to compile.
- } else {
- maybe_result = compilation_cache->LookupScript(
- source, script_name, line_offset, column_offset,
- is_shared_cross_origin, context);
}
}
@@ -1322,10 +1330,15 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
script->set_column_offset(Smi::FromInt(column_offset));
}
script->set_is_shared_cross_origin(is_shared_cross_origin);
+ script->set_is_embedder_debug_script(is_embedder_debug_script);
// Compile the function and add it to the cache.
CompilationInfoWithZone info(script);
- info.MarkAsGlobal();
+ if (FLAG_harmony_modules && is_module) {
+ info.MarkAsModule();
+ } else {
+ info.MarkAsGlobal();
+ }
info.SetCachedData(cached_data, compile_options);
info.SetExtension(extension);
info.SetContext(context);
@@ -1333,11 +1346,12 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
compile_options == ScriptCompiler::kProduceCodeCache) {
info.PrepareForSerializing();
}
- if (FLAG_use_strict) info.SetStrictMode(STRICT);
+ info.SetLanguageMode(
+ static_cast<LanguageMode>(info.language_mode() | language_mode));
result = CompileToplevel(&info);
if (extension == NULL && !result.is_null() && !result->dont_cache()) {
- compilation_cache->PutScript(source, context, result);
+ compilation_cache->PutScript(source, context, language_mode, result);
if (FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kProduceCodeCache) {
HistogramTimerScope histogram_timer(
@@ -1364,7 +1378,11 @@ Handle<SharedFunctionInfo> Compiler::CompileStreamedScript(
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
- if (FLAG_use_strict) info->SetStrictMode(STRICT);
+ LanguageMode language_mode =
+ construct_language_mode(FLAG_use_strict, FLAG_use_strong);
+ info->SetLanguageMode(
+ static_cast<LanguageMode>(info->language_mode() | language_mode));
+
// TODO(marja): FLAG_serialize_toplevel is not honoured and won't be; when the
// real code caching lands, streaming needs to be adapted to use it.
return CompileToplevel(info);
@@ -1378,7 +1396,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
CompilationInfoWithZone info(script);
info.SetFunction(literal);
info.PrepareForCompilation(literal->scope());
- info.SetStrictMode(literal->scope()->strict_mode());
+ info.SetLanguageMode(literal->scope()->language_mode());
if (outer_info->will_serialize()) info.PrepareForSerializing();
Isolate* isolate = info.isolate();
@@ -1422,7 +1440,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
// MakeCode will ensure that the feedback vector is present and
// appropriately sized.
DCHECK(!info.code().is_null());
- scope_info = ScopeInfo::Create(info.scope(), info.zone());
+ scope_info = ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
} else {
return Handle<SharedFunctionInfo>::null();
}
@@ -1431,7 +1449,11 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
Handle<SharedFunctionInfo> result = factory->NewSharedFunctionInfo(
literal->name(), literal->materialized_literal_count(), literal->kind(),
info.code(), scope_info, info.feedback_vector());
- SetFunctionInfo(result, literal, false, script);
+
+ SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
+ result->set_script(*script);
+ result->set_is_toplevel(false);
+
RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
result->set_allows_lazy_compilation(allow_lazy);
result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
@@ -1516,7 +1538,7 @@ Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
job->RetryOptimization(kOptimizationDisabled);
} else if (info->HasAbortedDueToDependencyChange()) {
job->RetryOptimization(kBailedOutDueToDependencyChange);
- } else if (isolate->DebuggerHasBreakPoints()) {
+ } else if (isolate->debug()->has_break_points()) {
job->RetryOptimization(kDebuggerHasBreakPoints);
} else if (job->GenerateCode() == OptimizedCompileJob::SUCCEEDED) {
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info.get(), shared);
@@ -1553,7 +1575,7 @@ bool Compiler::DebuggerWantsEagerCompilation(CompilationInfo* info,
CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
- : name_(name), info_(info), zone_(info->isolate()) {
+ : name_(name), info_(info) {
if (FLAG_hydrogen_stats) {
info_zone_start_allocation_size_ = info->zone()->allocation_size();
timer_.Start();
@@ -1563,7 +1585,7 @@ CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
CompilationPhase::~CompilationPhase() {
if (FLAG_hydrogen_stats) {
- unsigned size = zone()->allocation_size();
+ size_t size = zone()->allocation_size();
size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
}
@@ -1582,4 +1604,11 @@ bool CompilationPhase::ShouldProduceTraceOutput() const {
base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
}
+
+#if DEBUG
+void CompilationInfo::PrintAstForTesting() {
+ PrintF("--- Source from AST ---\n%s\n",
+ PrettyPrinter(isolate(), zone()).PrintProgram(function()));
+}
+#endif
} } // namespace v8::internal
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 3db32ce0e3..8ef2e0a95b 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -30,6 +30,77 @@ struct OffsetRange {
};
+// This class encapsulates encoding and decoding of sources positions from
+// which hydrogen values originated.
+// When FLAG_track_hydrogen_positions is set this object encodes the
+// identifier of the inlining and absolute offset from the start of the
+// inlined function.
+// When the flag is not set we simply track absolute offset from the
+// script start.
+class SourcePosition {
+ public:
+ SourcePosition(const SourcePosition& other) : value_(other.value_) {}
+
+ static SourcePosition Unknown() { return SourcePosition(kNoPosition); }
+
+ bool IsUnknown() const { return value_ == kNoPosition; }
+
+ uint32_t position() const { return PositionField::decode(value_); }
+ void set_position(uint32_t position) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<uint32_t>(PositionField::update(value_, position));
+ } else {
+ value_ = position;
+ }
+ }
+
+ uint32_t inlining_id() const { return InliningIdField::decode(value_); }
+ void set_inlining_id(uint32_t inlining_id) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ =
+ static_cast<uint32_t>(InliningIdField::update(value_, inlining_id));
+ }
+ }
+
+ uint32_t raw() const { return value_; }
+
+ private:
+ static const uint32_t kNoPosition =
+ static_cast<uint32_t>(RelocInfo::kNoPosition);
+ typedef BitField<uint32_t, 0, 9> InliningIdField;
+
+ // Offset from the start of the inlined function.
+ typedef BitField<uint32_t, 9, 23> PositionField;
+
+ explicit SourcePosition(uint32_t value) : value_(value) {}
+
+ friend class HPositionInfo;
+ friend class LCodeGenBase;
+
+ // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
+ // and PositionField.
+ // Otherwise contains absolute offset from the script start.
+ uint32_t value_;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const SourcePosition& p);
+
+
+class InlinedFunctionInfo {
+ public:
+ explicit InlinedFunctionInfo(Handle<SharedFunctionInfo> shared)
+ : shared_(shared), start_position_(shared->start_position()) {}
+
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ int start_position() const { return start_position_; }
+
+ private:
+ Handle<SharedFunctionInfo> shared_;
+ int start_position_;
+};
+
+
class ScriptData {
public:
ScriptData(const byte* data, int length);
@@ -73,27 +144,31 @@ class CompilationInfo {
kEval = 1 << 1,
kGlobal = 1 << 2,
kStrictMode = 1 << 3,
- kThisHasUses = 1 << 4,
- kNative = 1 << 5,
- kDeferredCalling = 1 << 6,
- kNonDeferredCalling = 1 << 7,
- kSavesCallerDoubles = 1 << 8,
- kRequiresFrame = 1 << 9,
- kMustNotHaveEagerFrame = 1 << 10,
- kDeoptimizationSupport = 1 << 11,
- kDebug = 1 << 12,
- kCompilingForDebugging = 1 << 13,
- kParseRestriction = 1 << 14,
- kSerializing = 1 << 15,
- kContextSpecializing = 1 << 16,
- kInliningEnabled = 1 << 17,
- kTypingEnabled = 1 << 18,
- kDisableFutureOptimization = 1 << 19,
- kToplevel = 1 << 20
+ kStrongMode = 1 << 4,
+ kThisHasUses = 1 << 5,
+ kNative = 1 << 6,
+ kDeferredCalling = 1 << 7,
+ kNonDeferredCalling = 1 << 8,
+ kSavesCallerDoubles = 1 << 9,
+ kRequiresFrame = 1 << 10,
+ kMustNotHaveEagerFrame = 1 << 11,
+ kDeoptimizationSupport = 1 << 12,
+ kDebug = 1 << 13,
+ kCompilingForDebugging = 1 << 14,
+ kParseRestriction = 1 << 15,
+ kSerializing = 1 << 16,
+ kContextSpecializing = 1 << 17,
+ kInliningEnabled = 1 << 18,
+ kTypingEnabled = 1 << 19,
+ kDisableFutureOptimization = 1 << 20,
+ kModule = 1 << 21,
+ kToplevel = 1 << 22,
+ kSplittingEnabled = 1 << 23
};
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
- CompilationInfo(Isolate* isolate, Zone* zone);
+ CompilationInfo(Handle<Script> script, Zone* zone);
+ CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
Isolate* isolate() const {
@@ -104,8 +179,10 @@ class CompilationInfo {
bool is_lazy() const { return GetFlag(kLazy); }
bool is_eval() const { return GetFlag(kEval); }
bool is_global() const { return GetFlag(kGlobal); }
- StrictMode strict_mode() const {
- return GetFlag(kStrictMode) ? STRICT : SLOPPY;
+ bool is_module() const { return GetFlag(kModule); }
+ LanguageMode language_mode() const {
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ return construct_language_mode(GetFlag(kStrictMode), GetFlag(kStrongMode));
}
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
@@ -115,7 +192,7 @@ class CompilationInfo {
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
void set_script(Handle<Script> script) { script_ = script; }
- HydrogenCodeStub* code_stub() const {return code_stub_; }
+ CodeStub* code_stub() const { return code_stub_; }
v8::Extension* extension() const { return extension_; }
ScriptData** cached_data() const { return cached_data_; }
ScriptCompiler::CompileOptions compile_options() const {
@@ -145,6 +222,11 @@ class CompilationInfo {
SetFlag(kGlobal);
}
+ void MarkAsModule() {
+ DCHECK(!is_lazy());
+ SetFlag(kModule);
+ }
+
void set_parameter_count(int parameter_count) {
DCHECK(IsStub());
parameter_count_ = parameter_count;
@@ -156,8 +238,10 @@ class CompilationInfo {
bool this_has_uses() { return GetFlag(kThisHasUses); }
- void SetStrictMode(StrictMode strict_mode) {
- SetFlag(kStrictMode, strict_mode == STRICT);
+ void SetLanguageMode(LanguageMode language_mode) {
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ SetFlag(kStrictMode, language_mode & STRICT_BIT);
+ SetFlag(kStrongMode, language_mode & STRONG_BIT);
}
void MarkAsNative() { SetFlag(kNative); }
@@ -214,6 +298,10 @@ class CompilationInfo {
bool is_toplevel() const { return GetFlag(kToplevel); }
+ void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
+
+ bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
+
bool IsCodePreAgingActive() const {
return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
!is_debug();
@@ -330,12 +418,15 @@ class CompilationInfo {
}
void AbortOptimization(BailoutReason reason) {
- if (bailout_reason_ != kNoReason) bailout_reason_ = reason;
+ DCHECK(reason != kNoReason);
+ if (bailout_reason_ == kNoReason) bailout_reason_ = reason;
SetFlag(kDisableFutureOptimization);
}
void RetryOptimization(BailoutReason reason) {
- if (bailout_reason_ != kNoReason) bailout_reason_ = reason;
+ DCHECK(reason != kNoReason);
+ if (GetFlag(kDisableFutureOptimization)) return;
+ bailout_reason_ = reason;
}
BailoutReason bailout_reason() const { return bailout_reason_; }
@@ -363,6 +454,15 @@ class CompilationInfo {
return result;
}
+ List<InlinedFunctionInfo>* inlined_function_infos() {
+ return inlined_function_infos_;
+ }
+ List<int>* inlining_id_to_function_id() {
+ return inlining_id_to_function_id_;
+ }
+ int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ SourcePosition position);
+
Handle<Foreign> object_wrapper() {
if (object_wrapper_.is_null()) {
object_wrapper_ =
@@ -394,14 +494,21 @@ class CompilationInfo {
ast_value_factory_owned_ = owned;
}
+ int osr_expr_stack_height() { return osr_expr_stack_height_; }
+ void set_osr_expr_stack_height(int height) {
+ DCHECK(height >= 0);
+ osr_expr_stack_height_ = height;
+ }
+
+#if DEBUG
+ void PrintAstForTesting();
+#endif
+
+ bool is_simple_parameter_list();
+
protected:
- CompilationInfo(Handle<Script> script,
- Zone* zone);
CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone);
- CompilationInfo(HydrogenCodeStub* stub,
- Isolate* isolate,
- Zone* zone);
CompilationInfo(ScriptCompiler::ExternalSourceStream* source_stream,
ScriptCompiler::StreamedSource::Encoding encoding,
Isolate* isolate, Zone* zone);
@@ -447,7 +554,7 @@ class CompilationInfo {
// The script scope provided as a convenience.
Scope* script_scope_;
// For compiled stubs, the stub object
- HydrogenCodeStub* code_stub_;
+ CodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
@@ -499,6 +606,8 @@ class CompilationInfo {
int prologue_offset_;
List<OffsetRange>* no_frame_ranges_;
+ List<InlinedFunctionInfo>* inlined_function_infos_;
+ List<int>* inlining_id_to_function_id_;
// A copy of shared_info()->opt_count() to avoid handle deref
// during graph optimization.
@@ -518,6 +627,8 @@ class CompilationInfo {
// should be abandoned due to dependency change.
bool aborted_due_to_dependency_change_;
+ int osr_expr_stack_height_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -527,21 +638,17 @@ class CompilationInfo {
class CompilationInfoWithZone: public CompilationInfo {
public:
explicit CompilationInfoWithZone(Handle<Script> script)
- : CompilationInfo(script, &zone_),
- zone_(script->GetIsolate()) {}
+ : CompilationInfo(script, &zone_) {}
explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
- : CompilationInfo(shared_info, &zone_),
- zone_(shared_info->GetIsolate()) {}
+ : CompilationInfo(shared_info, &zone_) {}
explicit CompilationInfoWithZone(Handle<JSFunction> closure)
- : CompilationInfo(closure, &zone_),
- zone_(closure->GetIsolate()) {}
- CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
- : CompilationInfo(stub, isolate, &zone_),
- zone_(isolate) {}
+ : CompilationInfo(closure, &zone_) {}
+ CompilationInfoWithZone(CodeStub* stub, Isolate* isolate)
+ : CompilationInfo(stub, isolate, &zone_) {}
CompilationInfoWithZone(ScriptCompiler::ExternalSourceStream* stream,
ScriptCompiler::StreamedSource::Encoding encoding,
Isolate* isolate)
- : CompilationInfo(stream, encoding, isolate, &zone_), zone_(isolate) {}
+ : CompilationInfo(stream, encoding, isolate, &zone_) {}
// Virtual destructor because a CompilationInfoWithZone has to exit the
// zone scope and get rid of dependent maps even when the destructor is
@@ -693,16 +800,16 @@ class Compiler : public AllStatic {
// Compile a String source within a context for eval.
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, StrictMode strict_mode,
+ Handle<Context> context, LanguageMode language_mode,
ParseRestriction restriction, int scope_position);
// Compile a String source within a context.
static Handle<SharedFunctionInfo> CompileScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
- int column_offset, bool is_shared_cross_origin, Handle<Context> context,
- v8::Extension* extension, ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options,
- NativesFlag is_natives_code);
+ int column_offset, bool is_debugger_script, bool is_shared_cross_origin,
+ Handle<Context> context, v8::Extension* extension,
+ ScriptData** cached_data, ScriptCompiler::CompileOptions compile_options,
+ NativesFlag is_natives_code, bool is_module);
static Handle<SharedFunctionInfo> CompileStreamedScript(CompilationInfo* info,
int source_length);
@@ -749,7 +856,7 @@ class CompilationPhase BASE_EMBEDDED {
const char* name_;
CompilationInfo* info_;
Zone zone_;
- unsigned info_zone_start_allocation_size_;
+ size_t info_zone_start_allocation_size_;
base::ElapsedTimer timer_;
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
diff --git a/deps/v8/src/compiler/DEPS b/deps/v8/src/compiler/DEPS
new file mode 100644
index 0000000000..60e2f6d742
--- /dev/null
+++ b/deps/v8/src/compiler/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "-src/v8.h",
+]
diff --git a/deps/v8/src/compiler/STYLE b/deps/v8/src/compiler/STYLE
new file mode 100644
index 0000000000..ae41e3f989
--- /dev/null
+++ b/deps/v8/src/compiler/STYLE
@@ -0,0 +1,29 @@
+Compiler Coding Style
+=====================
+
+Coding style for the TurboFan compiler generally follows the Google C++ Style
+Guide and the Chromium Coding Style. The notes below are usually just extensions
+beyond what the Google style guide already says. If this document doesn't
+mention a rule, follow the Google C++ style.
+
+
+TODOs
+-----
+We use the following convention for putting TODOs into the code:
+
+ * A TODO(turbofan) implies a performance improvement opportunity.
+ * A TODO(name) implies an incomplete implementation.
+
+
+Use of C++11 auto keyword
+-------------------------
+Use auto to avoid type names that are just clutter. Continue to use manifest
+type declarations when it helps readability, and never use auto for anything
+but local variables, in particular auto should only be used where it is obvious
+from context what the type is:
+
+ for (auto block : x->blocks()) // clearly a Block of some kind
+ for (auto instr : x->instructions()) // clearly an Instruction of some kind
+
+ for (auto b : x->predecessors()) // less clear, better to make it explicit
+ for (BasicBlock* b : x->predecessors()) // now clear
diff --git a/deps/v8/src/compiler/all-nodes.cc b/deps/v8/src/compiler/all-nodes.cc
new file mode 100644
index 0000000000..b055a68c08
--- /dev/null
+++ b/deps/v8/src/compiler/all-nodes.cc
@@ -0,0 +1,48 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/all-nodes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AllNodes::AllNodes(Zone* local_zone, const Graph* graph)
+ : live(local_zone),
+ gray(local_zone),
+ state(graph->NodeCount(), AllNodes::kDead, local_zone) {
+ Node* end = graph->end();
+ state[end->id()] = AllNodes::kLive;
+ live.push_back(end);
+ // Find all live nodes reachable from end.
+ for (size_t i = 0; i < live.size(); i++) {
+ for (Node* const input : live[i]->inputs()) {
+ if (input == nullptr) {
+ // TODO(titzer): print a warning.
+ continue;
+ }
+ if (input->id() >= graph->NodeCount()) {
+ // TODO(titzer): print a warning.
+ continue;
+ }
+ if (state[input->id()] != AllNodes::kLive) {
+ live.push_back(input);
+ state[input->id()] = AllNodes::kLive;
+ }
+ }
+ }
+
+ // Find all nodes that are not reachable from end that use live nodes.
+ for (size_t i = 0; i < live.size(); i++) {
+ for (Node* const use : live[i]->uses()) {
+ if (state[use->id()] == AllNodes::kDead) {
+ gray.push_back(use);
+ state[use->id()] = AllNodes::kGray;
+ }
+ }
+ }
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/all-nodes.h b/deps/v8/src/compiler/all-nodes.h
new file mode 100644
index 0000000000..e6a83ef623
--- /dev/null
+++ b/deps/v8/src/compiler/all-nodes.h
@@ -0,0 +1,41 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ALL_NODES_H_
+#define V8_COMPILER_ALL_NODES_H_
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper utility that traverses the graph and gathers all nodes reachable
+// from end.
+class AllNodes {
+ public:
+ // Constructor. Traverses the graph and builds the {live} and {gray} sets.
+ AllNodes(Zone* local_zone, const Graph* graph);
+
+ bool IsLive(Node* node) {
+ return node != nullptr && node->id() < static_cast<int>(state.size()) &&
+ state[node->id()] == kLive;
+ }
+
+ NodeVector live; // Nodes reachable from end.
+ NodeVector gray; // Nodes themselves not reachable from end, but that
+ // appear in use lists of live nodes.
+
+ private:
+ enum State { kDead, kGray, kLive };
+
+ ZoneVector<State> state;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_ALL_NODES_H_
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index cfa4de9b3c..1ff7ea3acc 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -8,7 +8,6 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/scopes.h"
namespace v8 {
@@ -199,6 +198,41 @@ class OutOfLineLoadInteger FINAL : public OutOfLineCode {
Register const result_;
};
+
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kOverflow:
+ return vs;
+ case kNotOverflow:
+ return vc;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
} // namespace
@@ -302,6 +336,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArchNop:
// don't emit code for nops.
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -458,8 +500,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVcmpF64:
- __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ if (instr->InputAt(1)->IsDoubleRegister()) {
+ __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
+ } else {
+ DCHECK(instr->InputAt(1)->IsImmediate());
+ // 0.0 is the only immediate supported by vcmp instructions.
+ DCHECK(i.InputDouble(1) == 0.0);
+ __ VFPCompareAndSetFlags(i.InputFloat64Register(0), i.InputDouble(1));
+ }
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF64:
@@ -685,65 +734,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArmOperandConverter i(this, instr);
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- switch (branch->condition) {
- case kUnorderedEqual:
- // The "eq" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kEqual:
- __ b(eq, tlabel);
- break;
- case kUnorderedNotEqual:
- // Unordered or not equal can be tested with "ne" condtion.
- // See ARMv7 manual A8.3 - Conditional execution.
- case kNotEqual:
- __ b(ne, tlabel);
- break;
- case kSignedLessThan:
- __ b(lt, tlabel);
- break;
- case kSignedGreaterThanOrEqual:
- __ b(ge, tlabel);
- break;
- case kSignedLessThanOrEqual:
- __ b(le, tlabel);
- break;
- case kSignedGreaterThan:
- __ b(gt, tlabel);
- break;
- case kUnorderedLessThan:
- // The "lo" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kUnsignedLessThan:
- __ b(lo, tlabel);
- break;
- case kUnorderedGreaterThanOrEqual:
- // Unordered, greater than or equal can be tested with "hs" condtion.
- // See ARMv7 manual A8.3 - Conditional execution.
- case kUnsignedGreaterThanOrEqual:
- __ b(hs, tlabel);
- break;
- case kUnorderedLessThanOrEqual:
- // The "ls" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kUnsignedLessThanOrEqual:
- __ b(ls, tlabel);
- break;
- case kUnorderedGreaterThan:
- // Unordered or greater than can be tested with "hi" condtion.
- // See ARMv7 manual A8.3 - Conditional execution.
- case kUnsignedGreaterThan:
- __ b(hi, tlabel);
- break;
- case kOverflow:
- __ b(vs, tlabel);
- break;
- case kNotOverflow:
- __ b(vc, tlabel);
- break;
- }
+ Condition cc = FlagsConditionToCondition(branch->condition);
+ __ b(cc, tlabel);
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
@@ -757,86 +749,39 @@ void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
ArmOperandConverter i(this, instr);
- Label done;
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
- Label check;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cc = kNoCondition;
- switch (condition) {
- case kUnorderedEqual:
- __ b(vc, &check);
- __ mov(reg, Operand(0));
- __ b(&done);
- // Fall through.
- case kEqual:
- cc = eq;
- break;
- case kUnorderedNotEqual:
- __ b(vc, &check);
- __ mov(reg, Operand(1));
- __ b(&done);
- // Fall through.
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnorderedLessThan:
- __ b(vc, &check);
- __ mov(reg, Operand(0));
- __ b(&done);
- // Fall through.
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnorderedGreaterThanOrEqual:
- __ b(vc, &check);
- __ mov(reg, Operand(1));
- __ b(&done);
- // Fall through.
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnorderedLessThanOrEqual:
- __ b(vc, &check);
- __ mov(reg, Operand(0));
- __ b(&done);
- // Fall through.
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnorderedGreaterThan:
- __ b(vc, &check);
- __ mov(reg, Operand(1));
- __ b(&done);
- // Fall through.
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- case kOverflow:
- cc = vs;
- break;
- case kNotOverflow:
- cc = vc;
- break;
- }
- __ bind(&check);
+ Condition cc = FlagsConditionToCondition(condition);
__ mov(reg, Operand(0));
__ mov(reg, Operand(1), LeaveCC, cc);
- __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmp(input, Operand(i.InputInt32(index + 0)));
+ __ b(eq, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ __ cmp(input, Operand(case_count));
+ __ BlockConstPoolFor(case_count + 2);
+ __ ldr(pc, MemOperand(pc, input, LSL, 2), lo);
+ __ b(GetLabel(i.InputRpo(1)));
+ for (size_t index = 0; index < case_count; ++index) {
+ __ dd(GetLabel(i.InputRpo(index + 2)));
+ }
}
@@ -849,6 +794,7 @@ void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
bool saved_pp;
if (FLAG_enable_ool_constant_pool) {
@@ -877,12 +823,26 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ } else if (stack_slots > 0) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
- int stack_slots = frame()->GetSpillSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
+ stack_slots -= frame()->GetOsrStackSlotCount();
+ }
+
if (stack_slots > 0) {
__ sub(sp, sp, Operand(stack_slots * kPointerSize));
}
@@ -891,10 +851,10 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ add(sp, sp, Operand(stack_slots * kPointerSize));
}
@@ -906,13 +866,15 @@ void CodeGenerator::AssembleReturn() {
}
__ LeaveFrame(StackFrame::MANUAL);
__ Ret();
- } else {
+ } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ LeaveFrame(StackFrame::MANUAL);
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ Drop(pop_count);
__ Ret();
+ } else {
+ __ Ret();
}
}
@@ -1082,6 +1044,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 32-bit ARM we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// On 32-bit ARM we do not insert nops for inlined Smi code.
}
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index ef9e89ed4e..47511a1ebf 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -5,6 +5,7 @@
#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -98,8 +99,8 @@ template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
AddressingMode kImmMode, AddressingMode kRegMode>
bool TryMatchShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
- InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
ArmOperandGenerator g(selector);
if (node->opcode() == kOpcode) {
Int32BinopMatcher m(node);
@@ -118,8 +119,8 @@ bool TryMatchShift(InstructionSelector* selector,
bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
- Node* node, InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ Node* node, InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
value_return, shift_return);
@@ -127,8 +128,8 @@ bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
- Node* node, InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ Node* node, InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
value_return, shift_return);
@@ -136,8 +137,8 @@ bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
- Node* node, InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ Node* node, InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
value_return, shift_return);
@@ -145,8 +146,8 @@ bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
- Node* node, InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ Node* node, InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
value_return, shift_return);
@@ -155,8 +156,8 @@ bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
bool TryMatchShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
- InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return (
TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
@@ -168,7 +169,7 @@ bool TryMatchShift(InstructionSelector* selector,
bool TryMatchImmediateOrShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
size_t* input_count_return,
- InstructionOperand** inputs) {
+ InstructionOperand* inputs) {
ArmOperandGenerator g(selector);
if (g.CanBeImmediate(node, *opcode_return)) {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
@@ -189,9 +190,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[5];
+ InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
if (m.left().node() == m.right().node()) {
@@ -202,7 +203,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
// mov r0, r1, asr #16
// adds r0, r0, r1, asr #16
// bvs label
- InstructionOperand* const input = g.UseRegister(m.left().node());
+ InstructionOperand const input = g.UseRegister(m.left().node());
opcode |= AddressingModeField::encode(kMode_Operand2_R);
inputs[input_count++] = input;
inputs[input_count++] = input;
@@ -232,8 +233,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
@@ -308,10 +309,9 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
- Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
- g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps),
- temps);
+ InstructionOperand temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
+ Emit(kArmStoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r4),
+ g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps), temps);
return;
}
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
@@ -341,10 +341,10 @@ void InstructionSelector::VisitStore(Node* node) {
}
if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
}
}
@@ -378,10 +378,10 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.UseRegister(offset);
- InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer), offset_operand);
@@ -416,11 +416,11 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.UseRegister(offset);
- InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), nullptr,
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length);
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
offset_operand, length_operand, g.UseRegister(value),
g.UseRegister(buffer), offset_operand);
}
@@ -432,8 +432,8 @@ void EmitBic(InstructionSelector* selector, Node* node, Node* left,
Node* right) {
ArmOperandGenerator g(selector);
InstructionCode opcode = kArmBic;
- InstructionOperand* value_operand;
- InstructionOperand* shift_operand;
+ InstructionOperand value_operand;
+ InstructionOperand shift_operand;
if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
value_operand, shift_operand);
@@ -447,8 +447,8 @@ void EmitBic(InstructionSelector* selector, Node* node, Node* left,
void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
uint32_t lsb, uint32_t width) {
- DCHECK_LE(1, width);
- DCHECK_LE(width, 32 - lsb);
+ DCHECK_LE(1u, width);
+ DCHECK_LE(width, 32u - lsb);
ArmOperandGenerator g(selector);
selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
g.TempImmediate(lsb), g.TempImmediate(width));
@@ -480,7 +480,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
uint32_t msb = base::bits::CountLeadingZeros32(value);
// Try to interpret this AND as UBFX.
if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
- DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
if (m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 31)) {
@@ -534,8 +534,8 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(-1)) {
InstructionCode opcode = kArmMvn;
- InstructionOperand* value_operand;
- InstructionOperand* shift_operand;
+ InstructionOperand value_operand;
+ InstructionOperand shift_operand;
if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
&shift_operand)) {
Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
@@ -549,15 +549,16 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
}
+namespace {
+
template <typename TryMatchShift>
-static inline void VisitShift(InstructionSelector* selector, Node* node,
- TryMatchShift try_match_shift,
- FlagsContinuation* cont) {
+void VisitShift(InstructionSelector* selector, Node* node,
+ TryMatchShift try_match_shift, FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
InstructionCode opcode = kArmMov;
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 2;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
@@ -572,8 +573,8 @@ static inline void VisitShift(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
@@ -585,12 +586,14 @@ static inline void VisitShift(InstructionSelector* selector, Node* node,
template <typename TryMatchShift>
-static inline void VisitShift(InstructionSelector* selector, Node* node,
+void VisitShift(InstructionSelector* selector, Node* node,
TryMatchShift try_match_shift) {
FlagsContinuation cont;
VisitShift(selector, node, try_match_shift, &cont);
}
+} // namespace
+
void InstructionSelector::VisitWord32Shl(Node* node) {
VisitShift(this, node, TryMatchLSL);
@@ -602,7 +605,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (IsSupported(ARMv7) && m.left().IsWord32And() &&
m.right().IsInRange(0, 31)) {
- int32_t lsb = m.right().Value();
+ uint32_t lsb = m.right().Value();
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint32_t value = (mleft.right().Value() >> lsb) << lsb;
@@ -805,26 +808,26 @@ void InstructionSelector::VisitInt32MulHigh(Node* node) {
void InstructionSelector::VisitUint32MulHigh(Node* node) {
ArmOperandGenerator g(this);
- InstructionOperand* outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
- InstructionOperand* inputs[] = {g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1))};
+ InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1))};
Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
- InstructionOperand* result_operand,
- InstructionOperand* left_operand,
- InstructionOperand* right_operand) {
+ InstructionOperand result_operand,
+ InstructionOperand left_operand,
+ InstructionOperand right_operand) {
ArmOperandGenerator g(selector);
if (selector->IsSupported(SUDIV)) {
selector->Emit(div_opcode, result_operand, left_operand, right_operand);
return;
}
- InstructionOperand* left_double_operand = g.TempDoubleRegister();
- InstructionOperand* right_double_operand = g.TempDoubleRegister();
- InstructionOperand* result_double_operand = g.TempDoubleRegister();
+ InstructionOperand left_double_operand = g.TempDoubleRegister();
+ InstructionOperand right_double_operand = g.TempDoubleRegister();
+ InstructionOperand result_double_operand = g.TempDoubleRegister();
selector->Emit(f64i32_opcode, left_double_operand, left_operand);
selector->Emit(f64i32_opcode, right_double_operand, right_operand);
selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
@@ -859,10 +862,10 @@ static void VisitMod(InstructionSelector* selector, Node* node,
ArchOpcode i32f64_opcode) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* div_operand = g.TempRegister();
- InstructionOperand* result_operand = g.DefineAsRegister(node);
- InstructionOperand* left_operand = g.UseRegister(m.left().node());
- InstructionOperand* right_operand = g.UseRegister(m.right().node());
+ InstructionOperand div_operand = g.TempRegister();
+ InstructionOperand result_operand = g.DefineAsRegister(node);
+ InstructionOperand left_operand = g.UseRegister(m.left().node());
+ InstructionOperand right_operand = g.UseRegister(m.right().node());
EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
left_operand, right_operand);
if (selector->IsSupported(MLS)) {
@@ -870,7 +873,7 @@ static void VisitMod(InstructionSelector* selector, Node* node,
left_operand);
return;
}
- InstructionOperand* mul_operand = g.TempRegister();
+ InstructionOperand mul_operand = g.TempRegister();
selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
}
@@ -1035,9 +1038,9 @@ void InstructionSelector::VisitCall(Node* node) {
// TODO(dcarney): might be possible to use claim/poke instead
// Push any stack arguments.
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- Emit(kArmPush, NULL, g.UseRegister(*input));
+ for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
+ ++i) {
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(*i));
}
// Select the appropriate opcode based on the call type.
@@ -1057,7 +1060,7 @@ void InstructionSelector::VisitCall(Node* node) {
opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
- InstructionOperand** first_output =
+ InstructionOperand* first_output =
buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
Instruction* call_instr =
Emit(opcode, buffer.outputs.size(), first_output,
@@ -1073,16 +1076,18 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Float64BinopMatcher m(node);
+ InstructionOperand rhs = m.right().Is(0.0) ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kArmVcmpF64), nullptr,
- g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node()), g.Label(cont->true_block()),
+ selector->Emit(cont->Encode(kArmVcmpF64), g.NoOutput(),
+ g.UseRegister(m.left().node()), rhs,
+ g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
DCHECK(cont->IsSet());
- selector->Emit(
- cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
- g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+ selector->Emit(cont->Encode(kArmVcmpF64),
+ g.DefineAsRegister(cont->result()),
+ g.UseRegister(m.left().node()), rhs);
}
}
@@ -1092,9 +1097,9 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[5];
+ InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
size_t output_count = 0;
if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
@@ -1120,7 +1125,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
+ DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
@@ -1167,25 +1172,25 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
- Node* const result = node->FindProjection(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
if (!result || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
@@ -1228,9 +1233,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
ArmOperandGenerator g(selector);
InstructionCode const opcode =
cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
- InstructionOperand* const value_operand = g.UseRegister(value);
+ InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, nullptr, value_operand, value_operand,
+ selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
@@ -1249,6 +1254,67 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ ArmOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 4 + value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
+ index_operand, value_operand, g.TempImmediate(min_value));
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -1284,7 +1350,7 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
}
@@ -1294,7 +1360,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
}
@@ -1304,19 +1370,19 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/arm/linkage-arm.cc b/deps/v8/src/compiler/arm/linkage-arm.cc
index 3fca76ff54..57590d3e5b 100644
--- a/deps/v8/src/compiler/arm/linkage-arm.cc
+++ b/deps/v8/src/compiler/arm/linkage-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/assembler.h"
#include "src/code-stubs.h"
#include "src/compiler/linkage.h"
@@ -35,30 +33,32 @@ struct ArmLinkageHelperTraits {
typedef LinkageHelper<ArmLinkageHelperTraits> LH;
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+ return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
+ return LH::GetStubCallDescriptor(isolate, zone, descriptor,
+ stack_parameter_count, flags, properties);
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
+ const MachineSignature* sig) {
return LH::GetSimplifiedCDescriptor(zone, sig);
}
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index e025236059..89c2ffb6f8 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -8,7 +8,6 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/scopes.h"
namespace v8 {
@@ -215,6 +214,41 @@ class OutOfLineLoadZero FINAL : public OutOfLineCode {
Register const result_;
};
+
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kOverflow:
+ return vs;
+ case kNotOverflow:
+ return vc;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ }
+ UNREACHABLE();
+ return nv;
+}
+
} // namespace
@@ -323,6 +357,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
case kArchNop:
// don't emit code for nops.
break;
@@ -588,7 +628,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Tst(i.InputRegister32(0), i.InputOperand32(1));
break;
case kArm64Float64Cmp:
- __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ if (instr->InputAt(1)->IsDoubleRegister()) {
+ __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ DCHECK(instr->InputAt(1)->IsImmediate());
+ // 0.0 is the only immediate supported by fcmp instructions.
+ DCHECK(i.InputDouble(1) == 0.0);
+ __ Fcmp(i.InputDoubleRegister(0), i.InputDouble(1));
+ }
break;
case kArm64Float64Add:
__ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -684,7 +731,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
- __ Add(index, object, Operand(index, SXTW));
+ __ Add(index, object, index);
__ Str(value, MemOperand(index));
SaveFPRegsMode mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
@@ -785,65 +832,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else {
- switch (condition) {
- case kUnorderedEqual:
- // The "eq" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kEqual:
- __ B(eq, tlabel);
- break;
- case kUnorderedNotEqual:
- // Unordered or not equal can be tested with "ne" condtion.
- // See ARMv8 manual C1.2.3 - Condition Code.
- case kNotEqual:
- __ B(ne, tlabel);
- break;
- case kSignedLessThan:
- __ B(lt, tlabel);
- break;
- case kSignedGreaterThanOrEqual:
- __ B(ge, tlabel);
- break;
- case kSignedLessThanOrEqual:
- __ B(le, tlabel);
- break;
- case kSignedGreaterThan:
- __ B(gt, tlabel);
- break;
- case kUnorderedLessThan:
- // The "lo" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kUnsignedLessThan:
- __ B(lo, tlabel);
- break;
- case kUnorderedGreaterThanOrEqual:
- // Unordered, greater than or equal can be tested with "hs" condtion.
- // See ARMv8 manual C1.2.3 - Condition Code.
- case kUnsignedGreaterThanOrEqual:
- __ B(hs, tlabel);
- break;
- case kUnorderedLessThanOrEqual:
- // The "ls" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kUnsignedLessThanOrEqual:
- __ B(ls, tlabel);
- break;
- case kUnorderedGreaterThan:
- // Unordered or greater than can be tested with "hi" condtion.
- // See ARMv8 manual C1.2.3 - Condition Code.
- case kUnsignedGreaterThan:
- __ B(hi, tlabel);
- break;
- case kOverflow:
- __ B(vs, tlabel);
- break;
- case kNotOverflow:
- __ B(vc, tlabel);
- break;
- }
+ Condition cc = FlagsConditionToCondition(condition);
+ __ B(cc, tlabel);
}
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
@@ -858,85 +848,45 @@ void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
Arm64OperandConverter i(this, instr);
- Label done;
// Materialize a full 64-bit 1 or 0 value. The result register is always the
// last output of the instruction.
- Label check;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cc = nv;
- switch (condition) {
- case kUnorderedEqual:
- __ B(vc, &check);
- __ Mov(reg, 0);
- __ B(&done);
- // Fall through.
- case kEqual:
- cc = eq;
- break;
- case kUnorderedNotEqual:
- __ B(vc, &check);
- __ Mov(reg, 1);
- __ B(&done);
- // Fall through.
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnorderedLessThan:
- __ B(vc, &check);
- __ Mov(reg, 0);
- __ B(&done);
- // Fall through.
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnorderedGreaterThanOrEqual:
- __ B(vc, &check);
- __ Mov(reg, 1);
- __ B(&done);
- // Fall through.
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnorderedLessThanOrEqual:
- __ B(vc, &check);
- __ Mov(reg, 0);
- __ B(&done);
- // Fall through.
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnorderedGreaterThan:
- __ B(vc, &check);
- __ Mov(reg, 1);
- __ B(&done);
- // Fall through.
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- case kOverflow:
- cc = vs;
- break;
- case kNotOverflow:
- cc = vc;
- break;
- }
- __ Bind(&check);
+ Condition cc = FlagsConditionToCondition(condition);
__ Cset(reg, cc);
- __ Bind(&done);
+}
+
+
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+ Register input = i.InputRegister32(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ Cmp(input, i.InputInt32(index + 0));
+ __ B(eq, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+ UseScratchRegisterScope scope(masm());
+ Register input = i.InputRegister32(0);
+ Register temp = scope.AcquireX();
+ size_t const case_count = instr->InputCount() - 2;
+ Label table;
+ __ Cmp(input, case_count);
+ __ B(hs, GetLabel(i.InputRpo(1)));
+ __ Adr(temp, &table);
+ __ Add(temp, temp, Operand(input, UXTW, 2));
+ __ Br(temp);
+ __ StartBlockPools();
+ __ Bind(&table);
+ for (size_t index = 0; index < case_count; ++index) {
+ __ B(GetLabel(i.InputRpo(index + 2)));
+ }
+ __ EndBlockPools();
}
@@ -956,6 +906,7 @@ static int AlignedStackSlots(int stack_slots) {
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ SetStackPointer(csp);
__ Push(lr, fp);
@@ -969,13 +920,27 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ } else if (stack_slots > 0) {
__ SetStackPointer(jssp);
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
- int stack_slots = frame()->GetSpillSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
+ stack_slots -= frame()->GetOsrStackSlotCount();
+ }
+
if (stack_slots > 0) {
Register sp = __ StackPointer();
if (!sp.Is(csp)) {
@@ -988,10 +953,10 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
}
@@ -1002,7 +967,7 @@ void CodeGenerator::AssembleReturn() {
__ Mov(csp, fp);
__ Pop(fp, lr);
__ Ret();
- } else {
+ } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ Mov(jssp, fp);
__ Pop(fp, lr);
int pop_count = descriptor->IsJSFunctionCall()
@@ -1010,6 +975,8 @@ void CodeGenerator::AssembleReturn() {
: 0;
__ Drop(pop_count);
__ Ret();
+ } else {
+ __ Ret();
}
}
@@ -1157,6 +1124,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit ARM we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 72661af4c3..6afd3e8c12 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -4,6 +4,7 @@
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -29,7 +30,7 @@ class Arm64OperandGenerator FINAL : public OperandGenerator {
explicit Arm64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
+ InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
if (CanBeImmediate(node, mode)) {
return UseImmediate(node);
}
@@ -173,9 +174,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
Matcher m(node);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
bool try_ror_operand = true;
@@ -214,8 +215,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
@@ -312,8 +313,8 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
- Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
+ InstructionOperand temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
+ Emit(kArm64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, x10),
g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
temps);
return;
@@ -353,10 +354,10 @@ void InstructionSelector::VisitStore(Node* node) {
return;
}
if (g.CanBeImmediate(index, immediate_mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
}
}
@@ -423,7 +424,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
UNREACHABLE();
return;
}
- Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
+ Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
}
@@ -506,7 +507,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
// The mask must be contiguous, and occupy the least-significant bits.
- DCHECK_EQ(0, base::bits::CountTrailingZeros32(mask));
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
@@ -543,7 +544,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
// The mask must be contiguous, and occupy the least-significant bits.
- DCHECK_EQ(0, base::bits::CountTrailingZeros64(mask));
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
@@ -627,7 +628,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
- int32_t lsb = m.right().Value();
+ uint32_t lsb = m.right().Value();
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
@@ -652,7 +653,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
- int64_t lsb = m.right().Value();
+ uint64_t lsb = m.right().Value();
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
@@ -863,7 +864,7 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
void InstructionSelector::VisitInt32MulHigh(Node* node) {
// TODO(arm64): Can we do better here?
Arm64OperandGenerator g(this);
- InstructionOperand* const smull_operand = g.TempRegister();
+ InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
@@ -873,7 +874,7 @@ void InstructionSelector::VisitInt32MulHigh(Node* node) {
void InstructionSelector::VisitUint32MulHigh(Node* node) {
// TODO(arm64): Can we do better here?
Arm64OperandGenerator g(this);
- InstructionOperand* const smull_operand = g.TempRegister();
+ InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
@@ -1106,7 +1107,7 @@ void InstructionSelector::VisitCall(Node* node) {
if (aligned_push_count > 0) {
// TODO(dcarney): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
- Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
+ Emit(kArm64Claim | MiscField::encode(aligned_push_count), g.NoOutput());
}
// Move arguments to the stack.
{
@@ -1114,12 +1115,13 @@ void InstructionSelector::VisitCall(Node* node) {
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
- Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
+ Emit(kArm64Poke | MiscField::encode(slot), g.NoOutput(),
+ g.UseRegister(input));
slot--;
}
// Now all pushes can be done in pairs.
for (; slot >= 0; slot -= 2) {
- Emit(kArm64PokePair | MiscField::encode(slot), NULL,
+ Emit(kArm64PokePair | MiscField::encode(slot), g.NoOutput(),
g.UseRegister(buffer.pushed_nodes[slot]),
g.UseRegister(buffer.pushed_nodes[slot - 1]));
}
@@ -1142,7 +1144,7 @@ void InstructionSelector::VisitCall(Node* node) {
opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
- InstructionOperand** first_output =
+ InstructionOperand* first_output =
buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
Instruction* call_instr =
Emit(opcode, buffer.outputs.size(), first_output,
@@ -1153,12 +1155,13 @@ void InstructionSelector::VisitCall(Node* node) {
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
DCHECK(cont->IsSet());
@@ -1220,10 +1223,14 @@ static void VisitWord64Test(InstructionSelector* selector, Node* node,
static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(left),
- g.UseRegister(right), cont);
+ Float64BinopMatcher m(node);
+ if (m.right().Is(0.0)) {
+ VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()), cont);
+ } else {
+ VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()), cont);
+ }
}
@@ -1295,25 +1302,25 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
kArithmeticImm);
case IrOpcode::kFloat64Equal:
- cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont.OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
if (result == NULL || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
@@ -1343,7 +1350,7 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
- Emit(cont.Encode(kArm64TestAndBranch32), NULL,
+ Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros32(m.right().Value())),
@@ -1361,7 +1368,7 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
- Emit(cont.Encode(kArm64TestAndBranch), NULL,
+ Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros64(m.right().Value())),
@@ -1378,12 +1385,73 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
// Branch could not be combined with a compare, compare against 0 and branch.
- Emit(cont.Encode(kArm64CompareAndBranch32), NULL, g.UseRegister(value),
- g.Label(cont.true_block()),
+ Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
+ g.UseRegister(value), g.Label(cont.true_block()),
g.Label(cont.false_block()))->MarkAsControl();
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ Arm64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 4 + value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArm64Sub32, index_operand, value_operand,
+ g.TempImmediate(min_value));
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
Node* const user = node;
FlagsContinuation cont(kEqual, node);
@@ -1457,7 +1525,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
kArithmeticImm, &cont);
@@ -1468,7 +1536,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
kArithmeticImm, &cont);
@@ -1497,19 +1565,19 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
@@ -1525,6 +1593,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/linkage-arm64.cc b/deps/v8/src/compiler/arm64/linkage-arm64.cc
index 291b5520f9..57945fd77f 100644
--- a/deps/v8/src/compiler/arm64/linkage-arm64.cc
+++ b/deps/v8/src/compiler/arm64/linkage-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/assembler.h"
#include "src/code-stubs.h"
#include "src/compiler/linkage.h"
@@ -35,30 +33,32 @@ struct Arm64LinkageHelperTraits {
typedef LinkageHelper<Arm64LinkageHelperTraits> LH;
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+ return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
+ return LH::GetStubCallDescriptor(isolate, zone, descriptor,
+ stack_parameter_count, flags, properties);
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
+ const MachineSignature* sig) {
return LH::GetSimplifiedCDescriptor(zone, sig);
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index cde5e7182d..4b1fa1c85a 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -7,10 +7,11 @@
#include "src/compiler.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
#include "src/full-codegen.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -19,23 +20,377 @@ namespace v8 {
namespace internal {
namespace compiler {
+
+// Each expression in the AST is evaluated in a specific context. This context
+// decides how the evaluation result is passed up the visitor.
+class AstGraphBuilder::AstContext BASE_EMBEDDED {
+ public:
+ bool IsEffect() const { return kind_ == Expression::kEffect; }
+ bool IsValue() const { return kind_ == Expression::kValue; }
+ bool IsTest() const { return kind_ == Expression::kTest; }
+
+ // Determines how to combine the frame state with the value
+ // that is about to be plugged into this AstContext.
+ OutputFrameStateCombine GetStateCombine() {
+ return IsEffect() ? OutputFrameStateCombine::Ignore()
+ : OutputFrameStateCombine::Push();
+ }
+
+ // Plug a node into this expression context. Call this function in tail
+ // position in the Visit functions for expressions.
+ virtual void ProduceValue(Node* value) = 0;
+
+ // Unplugs a node from this expression context. Call this to retrieve the
+ // result of another Visit function that already plugged the context.
+ virtual Node* ConsumeValue() = 0;
+
+ // Shortcut for "context->ProduceValue(context->ConsumeValue())".
+ void ReplaceValue() { ProduceValue(ConsumeValue()); }
+
+ protected:
+ AstContext(AstGraphBuilder* owner, Expression::Context kind);
+ virtual ~AstContext();
+
+ AstGraphBuilder* owner() const { return owner_; }
+ Environment* environment() const { return owner_->environment(); }
+
+// We want to be able to assert, in a context-specific way, that the stack
+// height makes sense when the context is filled.
+#ifdef DEBUG
+ int original_height_;
+#endif
+
+ private:
+ Expression::Context kind_;
+ AstGraphBuilder* owner_;
+ AstContext* outer_;
+};
+
+
+// Context to evaluate expression for its side effects only.
+class AstGraphBuilder::AstEffectContext FINAL : public AstContext {
+ public:
+ explicit AstEffectContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kEffect) {}
+ ~AstEffectContext() FINAL;
+ void ProduceValue(Node* value) FINAL;
+ Node* ConsumeValue() FINAL;
+};
+
+
+// Context to evaluate expression for its value (and side effects).
+class AstGraphBuilder::AstValueContext FINAL : public AstContext {
+ public:
+ explicit AstValueContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kValue) {}
+ ~AstValueContext() FINAL;
+ void ProduceValue(Node* value) FINAL;
+ Node* ConsumeValue() FINAL;
+};
+
+
+// Context to evaluate expression for a condition value (and side effects).
+class AstGraphBuilder::AstTestContext FINAL : public AstContext {
+ public:
+ explicit AstTestContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kTest) {}
+ ~AstTestContext() FINAL;
+ void ProduceValue(Node* value) FINAL;
+ Node* ConsumeValue() FINAL;
+};
+
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body and allows to
+// change the current {scope} and {context} during visitation.
+class AstGraphBuilder::ContextScope BASE_EMBEDDED {
+ public:
+ ContextScope(AstGraphBuilder* builder, Scope* scope, Node* context)
+ : builder_(builder),
+ outer_(builder->execution_context()),
+ scope_(scope),
+ depth_(builder_->environment()->ContextStackDepth()) {
+ builder_->environment()->PushContext(context); // Push.
+ builder_->set_execution_context(this);
+ }
+
+ ~ContextScope() {
+ builder_->set_execution_context(outer_); // Pop.
+ builder_->environment()->PopContext();
+ CHECK_EQ(depth_, builder_->environment()->ContextStackDepth());
+ }
+
+ // Current scope during visitation.
+ Scope* scope() const { return scope_; }
+
+ private:
+ AstGraphBuilder* builder_;
+ ContextScope* outer_;
+ Scope* scope_;
+ int depth_;
+};
+
+
+// Scoped class tracking control statements entered by the visitor. There are
+// different types of statements participating in this stack to properly track
+// local as well as non-local control flow:
+// - IterationStatement : Allows proper 'break' and 'continue' behavior.
+// - BreakableStatement : Allows 'break' from block and switch statements.
+// - TryCatchStatement : Intercepts 'throw' and implicit exceptional edges.
+// - TryFinallyStatement: Intercepts 'break', 'continue', 'throw' and 'return'.
+class AstGraphBuilder::ControlScope BASE_EMBEDDED {
+ public:
+ ControlScope(AstGraphBuilder* builder, int stack_delta)
+ : builder_(builder),
+ outer_(builder->execution_control()),
+ stack_delta_(stack_delta) {
+ builder_->set_execution_control(this); // Push.
+ }
+
+ virtual ~ControlScope() {
+ builder_->set_execution_control(outer_); // Pop.
+ }
+
+ // Either 'break' or 'continue' to the target statement.
+ void BreakTo(BreakableStatement* target);
+ void ContinueTo(BreakableStatement* target);
+
+ // Either 'return' or 'throw' the given value.
+ void ReturnValue(Node* return_value);
+ void ThrowValue(Node* exception_value);
+
+ class DeferredCommands;
+
+ protected:
+ enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_THROW };
+
+ // Performs one of the above commands on this stack of control scopes. This
+ // walks through the stack giving each scope a chance to execute or defer the
+ // given command by overriding the {Execute} method appropriately. Note that
+ // this also drops extra operands from the environment for each skipped scope.
+ void PerformCommand(Command cmd, Statement* target, Node* value);
+
+ // Interface to execute a given command in this scope. Returning {true} here
+ // indicates successful execution whereas {false} requests to skip scope.
+ virtual bool Execute(Command cmd, Statement* target, Node* value) {
+ // For function-level control.
+ switch (cmd) {
+ case CMD_THROW:
+ builder()->BuildThrow(value);
+ return true;
+ case CMD_RETURN:
+ builder()->BuildReturn(value);
+ return true;
+ case CMD_BREAK:
+ case CMD_CONTINUE:
+ break;
+ }
+ return false;
+ }
+
+ Environment* environment() { return builder_->environment(); }
+ AstGraphBuilder* builder() const { return builder_; }
+ int stack_delta() const { return stack_delta_; }
+
+ private:
+ AstGraphBuilder* builder_;
+ ControlScope* outer_;
+ int stack_delta_;
+};
+
+
+// Helper class for a try-finally control scope. It can record intercepted
+// control-flow commands that cause entry into a finally-block, and re-apply
+// them after again leaving that block. Special tokens are used to identify
+// paths going through the finally-block to dispatch after leaving the block.
+class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
+ public:
+ explicit DeferredCommands(AstGraphBuilder* owner)
+ : owner_(owner), deferred_(owner->zone()) {}
+
+ // One recorded control-flow command.
+ struct Entry {
+ Command command; // The command type being applied on this path.
+ Statement* statement; // The target statement for the command or {NULL}.
+ Node* value; // The passed value node for the command or {NULL}.
+ Node* token; // A token identifying this particular path.
+ };
+
+ // Records a control-flow command while entering the finally-block. This also
+ // generates a new dispatch token that identifies one particular path.
+ Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
+ Node* token = NewPathTokenForDeferredCommand();
+ deferred_.push_back({cmd, stmt, value, token});
+ return token;
+ }
+
+ // Returns the dispatch token to be used to identify the implicit fall-through
+ // path at the end of a try-block into the corresponding finally-block.
+ Node* GetFallThroughToken() { return NewPathTokenForImplicitFallThrough(); }
+
+ // Applies all recorded control-flow commands after the finally-block again.
+ // This generates a dynamic dispatch on the token from the entry point.
+ void ApplyDeferredCommands(Node* token) {
+ SwitchBuilder dispatch(owner_, static_cast<int>(deferred_.size()));
+ dispatch.BeginSwitch();
+ for (size_t i = 0; i < deferred_.size(); ++i) {
+ Node* condition = NewPathDispatchCondition(token, deferred_[i].token);
+ dispatch.BeginLabel(static_cast<int>(i), condition);
+ dispatch.EndLabel();
+ }
+ for (size_t i = 0; i < deferred_.size(); ++i) {
+ dispatch.BeginCase(static_cast<int>(i));
+ owner_->execution_control()->PerformCommand(
+ deferred_[i].command, deferred_[i].statement, deferred_[i].value);
+ dispatch.EndCase();
+ }
+ dispatch.EndSwitch();
+ }
+
+ protected:
+ Node* NewPathTokenForDeferredCommand() {
+ return owner_->jsgraph()->Constant(static_cast<int>(deferred_.size()));
+ }
+ Node* NewPathTokenForImplicitFallThrough() {
+ return owner_->jsgraph()->Constant(-1);
+ }
+ Node* NewPathDispatchCondition(Node* t1, Node* t2) {
+ // TODO(mstarzinger): This should be machine()->WordEqual(), but our Phi
+ // nodes all have kRepTagged|kTypeAny, which causes representation mismatch.
+ return owner_->NewNode(owner_->javascript()->StrictEqual(), t1, t2);
+ }
+
+ private:
+ AstGraphBuilder* owner_;
+ ZoneVector<Entry> deferred_;
+};
+
+
+// Control scope implementation for a BreakableStatement.
+class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
+ public:
+ ControlScopeForBreakable(AstGraphBuilder* owner, BreakableStatement* target,
+ ControlBuilder* control)
+ : ControlScope(owner, 0), target_(target), control_(control) {}
+
+ protected:
+ virtual bool Execute(Command cmd, Statement* target, Node* value) OVERRIDE {
+ if (target != target_) return false; // We are not the command target.
+ switch (cmd) {
+ case CMD_BREAK:
+ control_->Break();
+ return true;
+ case CMD_CONTINUE:
+ case CMD_THROW:
+ case CMD_RETURN:
+ break;
+ }
+ return false;
+ }
+
+ private:
+ BreakableStatement* target_;
+ ControlBuilder* control_;
+};
+
+
+// Control scope implementation for an IterationStatement.
+class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
+ public:
+ ControlScopeForIteration(AstGraphBuilder* owner, IterationStatement* target,
+ LoopBuilder* control, int stack_delta)
+ : ControlScope(owner, stack_delta), target_(target), control_(control) {}
+
+ protected:
+ virtual bool Execute(Command cmd, Statement* target, Node* value) OVERRIDE {
+ if (target != target_) return false; // We are not the command target.
+ switch (cmd) {
+ case CMD_BREAK:
+ control_->Break();
+ return true;
+ case CMD_CONTINUE:
+ control_->Continue();
+ return true;
+ case CMD_THROW:
+ case CMD_RETURN:
+ break;
+ }
+ return false;
+ }
+
+ private:
+ BreakableStatement* target_;
+ LoopBuilder* control_;
+};
+
+
+// Control scope implementation for a TryCatchStatement.
+class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
+ public:
+ ControlScopeForCatch(AstGraphBuilder* owner, TryCatchBuilder* control)
+ : ControlScope(owner, 0), control_(control) {}
+
+ protected:
+ virtual bool Execute(Command cmd, Statement* target, Node* value) OVERRIDE {
+ switch (cmd) {
+ case CMD_THROW:
+ control_->Throw(value);
+ return true;
+ case CMD_BREAK:
+ case CMD_CONTINUE:
+ case CMD_RETURN:
+ break;
+ }
+ return false;
+ }
+
+ private:
+ TryCatchBuilder* control_;
+};
+
+
+// Control scope implementation for a TryFinallyStatement.
+class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
+ public:
+ ControlScopeForFinally(AstGraphBuilder* owner, DeferredCommands* commands,
+ TryFinallyBuilder* control)
+ : ControlScope(owner, 0), commands_(commands), control_(control) {}
+
+ protected:
+ virtual bool Execute(Command cmd, Statement* target, Node* value) OVERRIDE {
+ Node* token = commands_->RecordCommand(cmd, target, value);
+ control_->LeaveTry(token);
+ return true;
+ }
+
+ private:
+ DeferredCommands* commands_;
+ TryFinallyBuilder* control_;
+};
+
+
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph, LoopAssignmentAnalysis* loop)
- : StructuredGraphBuilder(local_zone, jsgraph->graph(), jsgraph->common()),
+ : local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph),
+ environment_(nullptr),
+ ast_context_(nullptr),
globals_(0, local_zone),
- breakable_(NULL),
- execution_context_(NULL),
+ execution_control_(nullptr),
+ execution_context_(nullptr),
+ input_buffer_size_(0),
+ input_buffer_(nullptr),
+ exit_control_(nullptr),
loop_assignment_analysis_(loop) {
- InitializeAstVisitor(local_zone);
+ InitializeAstVisitor(info->isolate(), local_zone);
}
Node* AstGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
- // Parameter -1 is special for the function closure
- const Operator* op = common()->Parameter(-1);
+ const Operator* op =
+ common()->Parameter(Linkage::kJSFunctionCallClosureParamIndex);
Node* node = NewNode(op, graph()->start());
function_closure_.set(node);
}
@@ -43,18 +398,29 @@ Node* AstGraphBuilder::GetFunctionClosure() {
}
-Node* AstGraphBuilder::GetFunctionContext() {
- if (!function_context_.is_set()) {
- // Parameter (arity + 1) is special for the outer context of the function
- const Operator* op = common()->Parameter(info()->num_parameters() + 1);
- Node* node = NewNode(op, graph()->start());
- function_context_.set(node);
- }
- return function_context_.get();
+void AstGraphBuilder::CreateFunctionContext(bool constant_context) {
+ function_context_.set(constant_context
+ ? jsgraph()->HeapConstant(info()->context())
+ : NewOuterContextParam());
+}
+
+
+Node* AstGraphBuilder::NewOuterContextParam() {
+ // Parameter (arity + 1) is special for the outer context of the function
+ const Operator* op = common()->Parameter(info()->num_parameters() + 1);
+ return NewNode(op, graph()->start());
}
-bool AstGraphBuilder::CreateGraph() {
+Node* AstGraphBuilder::NewCurrentContextOsrValue() {
+ // TODO(titzer): use a real OSR value here; a parameter works by accident.
+ // Parameter (arity + 1) is special for the outer context of the function
+ const Operator* op = common()->Parameter(info()->num_parameters() + 1);
+ return NewNode(op, graph()->start());
+}
+
+
+bool AstGraphBuilder::CreateGraph(bool constant_context) {
Scope* scope = info()->scope();
DCHECK(graph() != NULL);
@@ -62,13 +428,22 @@ bool AstGraphBuilder::CreateGraph() {
int parameter_count = info()->num_parameters();
graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
+ // Initialize control scope.
+ ControlScope control(this, 0);
+
// Initialize the top-level environment.
Environment env(this, scope, graph()->start());
set_environment(&env);
+ if (info()->is_osr()) {
+ // Use OSR normal entry as the start of the top-level environment.
+ // It will be replaced with {Dead} after typing and optimizations.
+ NewNode(common()->OsrNormalEntry());
+ }
+
// Initialize the incoming context.
- Node* outer_context = GetFunctionContext();
- set_current_context(outer_context);
+ CreateFunctionContext(constant_context);
+ ContextScope incoming(this, scope, function_context_.get());
// Build receiver check for sloppy mode if necessary.
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
@@ -76,16 +451,39 @@ bool AstGraphBuilder::CreateGraph() {
Node* patched_receiver = BuildPatchReceiverToGlobalProxy(original_receiver);
env.Bind(scope->receiver(), patched_receiver);
- // Build node to initialize local function context.
- Node* closure = GetFunctionClosure();
- Node* inner_context = BuildLocalFunctionContext(outer_context, closure);
+ // Build function context only if there are context allocated variables.
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ // Push a new inner context scope for the function.
+ Node* closure = GetFunctionClosure();
+ Node* inner_context =
+ BuildLocalFunctionContext(function_context_.get(), closure);
+ ContextScope top_context(this, scope, inner_context);
+ CreateGraphBody();
+ } else {
+ // Simply use the outer function context in building the graph.
+ CreateGraphBody();
+ }
- // Push top-level function scope for the function body.
- ContextScope top_context(this, scope, inner_context);
+ // Finish the basic structure of the graph.
+ graph()->SetEnd(graph()->NewNode(common()->End(), exit_control()));
+
+ // Failures indicated by stack overflow.
+ return !HasStackOverflow();
+}
+
+
+void AstGraphBuilder::CreateGraphBody() {
+ Scope* scope = info()->scope();
// Build the arguments object if it is used.
BuildArgumentsObject(scope->arguments());
+ // Build rest arguments array if it is used.
+ int rest_index;
+ Variable* rest_parameter = scope->rest_parameter(&rest_index);
+ BuildRestArgumentsArray(rest_parameter, rest_index);
+
// Emit tracing call if requested to do so.
if (FLAG_trace) {
NewNode(javascript()->CallRuntime(Runtime::kTraceEnter, 0));
@@ -105,7 +503,6 @@ bool AstGraphBuilder::CreateGraph() {
// Visit statements in the function body.
VisitStatements(info()->function()->body());
- if (HasStackOverflow()) return false;
// Emit tracing call if requested to do so.
if (FLAG_trace) {
@@ -115,14 +512,7 @@ bool AstGraphBuilder::CreateGraph() {
}
// Return 'undefined' in case we can fall off the end.
- Node* control = NewNode(common()->Return(), jsgraph()->UndefinedConstant());
- UpdateControlDependencyToLeaveFunction(control);
-
- // Finish the basic structure of the graph.
- environment()->UpdateControlDependency(exit_control());
- graph()->SetEnd(NewNode(common()->End()));
-
- return true;
+ BuildReturn(jsgraph()->UndefinedConstant());
}
@@ -142,21 +532,19 @@ static LhsKind DetermineLhsKind(Expression* expr) {
}
-StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
- StructuredGraphBuilder::Environment* env) {
- return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
-}
-
-
AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
Scope* scope,
Node* control_dependency)
- : StructuredGraphBuilder::Environment(builder, control_dependency),
+ : builder_(builder),
parameters_count_(scope->num_parameters() + 1),
locals_count_(scope->num_stack_slots()),
- parameters_node_(NULL),
- locals_node_(NULL),
- stack_node_(NULL) {
+ values_(builder_->local_zone()),
+ contexts_(builder_->local_zone()),
+ control_dependency_(control_dependency),
+ effect_dependency_(control_dependency),
+ parameters_node_(nullptr),
+ locals_node_(nullptr),
+ stack_node_(nullptr) {
DCHECK_EQ(scope->num_parameters() + 1, parameters_count());
// Bind the receiver variable.
@@ -178,14 +566,25 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
}
-AstGraphBuilder::Environment::Environment(const Environment& copy)
- : StructuredGraphBuilder::Environment(
- static_cast<StructuredGraphBuilder::Environment>(copy)),
- parameters_count_(copy.parameters_count_),
- locals_count_(copy.locals_count_),
- parameters_node_(copy.parameters_node_),
- locals_node_(copy.locals_node_),
- stack_node_(copy.stack_node_) {}
+AstGraphBuilder::Environment::Environment(
+ const AstGraphBuilder::Environment* copy)
+ : builder_(copy->builder_),
+ parameters_count_(copy->parameters_count_),
+ locals_count_(copy->locals_count_),
+ values_(copy->zone()),
+ contexts_(copy->zone()),
+ control_dependency_(copy->control_dependency_),
+ effect_dependency_(copy->effect_dependency_),
+ parameters_node_(copy->parameters_node_),
+ locals_node_(copy->locals_node_),
+ stack_node_(copy->stack_node_) {
+ const size_t kStackEstimate = 7; // optimum from experimentation!
+ values_.reserve(copy->values_.size() + kStackEstimate);
+ values_.insert(values_.begin(), copy->values_.begin(), copy->values_.end());
+ contexts_.reserve(copy->contexts_.size());
+ contexts_.insert(contexts_.begin(), copy->contexts_.begin(),
+ copy->contexts_.end());
+}
void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
@@ -220,7 +619,7 @@ Node* AstGraphBuilder::Environment::Checkpoint(
const Operator* op = common()->FrameState(JS_FRAME, ast_id, combine);
return graph()->NewNode(op, parameters_node_, locals_node_, stack_node_,
- GetContext(),
+ builder()->current_context(),
builder()->jsgraph()->UndefinedConstant());
}
@@ -283,25 +682,48 @@ Node* AstGraphBuilder::AstTestContext::ConsumeValue() {
}
-AstGraphBuilder::BreakableScope* AstGraphBuilder::BreakableScope::FindBreakable(
- BreakableStatement* target) {
- BreakableScope* current = this;
- while (current != NULL && current->target_ != target) {
- owner_->environment()->Drop(current->drop_extra_);
- current = current->next_;
+Scope* AstGraphBuilder::current_scope() const {
+ return execution_context_->scope();
+}
+
+
+Node* AstGraphBuilder::current_context() const {
+ return environment()->Context();
+}
+
+
+void AstGraphBuilder::ControlScope::PerformCommand(Command command,
+ Statement* target,
+ Node* value) {
+ Environment* env = environment()->CopyAsUnreachable();
+ ControlScope* current = this;
+ while (current != NULL) {
+ if (current->Execute(command, target, value)) break;
+ environment()->Drop(current->stack_delta());
+ current = current->outer_;
}
- DCHECK(current != NULL); // Always found (unless stack is malformed).
- return current;
+ builder()->set_environment(env);
+ DCHECK(current != NULL); // Always handled (unless stack is malformed).
}
-void AstGraphBuilder::BreakableScope::BreakTarget(BreakableStatement* stmt) {
- FindBreakable(stmt)->control_->Break();
+void AstGraphBuilder::ControlScope::BreakTo(BreakableStatement* stmt) {
+ PerformCommand(CMD_BREAK, stmt, nullptr);
}
-void AstGraphBuilder::BreakableScope::ContinueTarget(BreakableStatement* stmt) {
- FindBreakable(stmt)->control_->Continue();
+void AstGraphBuilder::ControlScope::ContinueTo(BreakableStatement* stmt) {
+ PerformCommand(CMD_CONTINUE, stmt, nullptr);
+}
+
+
+void AstGraphBuilder::ControlScope::ReturnValue(Node* return_value) {
+ PerformCommand(CMD_RETURN, nullptr, return_value);
+}
+
+
+void AstGraphBuilder::ControlScope::ThrowValue(Node* exception_value) {
+ PerformCommand(CMD_THROW, nullptr, exception_value);
}
@@ -313,6 +735,14 @@ void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
}
+void AstGraphBuilder::VisitForValueOrTheHole(Expression* expr) {
+ if (expr == NULL) {
+ return environment()->Push(jsgraph()->TheHoleConstant());
+ }
+ VisitForValue(expr);
+}
+
+
void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
VisitForValue(exprs->at(i));
@@ -443,11 +873,6 @@ void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
void AstGraphBuilder::VisitModuleLiteral(ModuleLiteral* modl) { UNREACHABLE(); }
-void AstGraphBuilder::VisitModuleVariable(ModuleVariable* modl) {
- UNREACHABLE();
-}
-
-
void AstGraphBuilder::VisitModulePath(ModulePath* modl) { UNREACHABLE(); }
@@ -456,18 +881,15 @@ void AstGraphBuilder::VisitModuleUrl(ModuleUrl* modl) { UNREACHABLE(); }
void AstGraphBuilder::VisitBlock(Block* stmt) {
BlockBuilder block(this);
- BreakableScope scope(this, stmt, &block, 0);
+ ControlScopeForBreakable scope(this, stmt, &block);
if (stmt->labels() != NULL) block.BeginBlock();
if (stmt->scope() == NULL) {
// Visit statements in the same scope, no declarations.
VisitStatements(stmt->statements());
} else {
- const Operator* op = javascript()->CreateBlockContext();
- Node* scope_info = jsgraph()->Constant(stmt->scope()->GetScopeInfo());
- Node* context = NewNode(op, scope_info, GetFunctionClosure());
- ContextScope scope(this, stmt->scope(), context);
-
// Visit declarations and statements in a block scope.
+ Node* context = BuildLocalBlockContext(stmt->scope());
+ ContextScope scope(this, stmt->scope(), context);
VisitDeclarations(stmt->scope()->declarations());
VisitStatements(stmt->statements());
}
@@ -504,24 +926,19 @@ void AstGraphBuilder::VisitIfStatement(IfStatement* stmt) {
void AstGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
- breakable()->ContinueTarget(stmt->target());
- set_environment(env);
+ execution_control()->ContinueTo(stmt->target());
}
void AstGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
- StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
- breakable()->BreakTarget(stmt->target());
- set_environment(env);
+ execution_control()->BreakTo(stmt->target());
}
void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
VisitForValue(stmt->expression());
Node* result = environment()->Pop();
- Node* control = NewNode(common()->Return(), result);
- UpdateControlDependencyToLeaveFunction(control);
+ execution_control()->ReturnValue(result);
}
@@ -530,6 +947,7 @@ void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
Node* value = environment()->Pop();
const Operator* op = javascript()->CreateWithContext();
Node* context = NewNode(op, value, GetFunctionClosure());
+ PrepareFrameState(context, stmt->EntryId());
ContextScope scope(this, stmt->scope(), context);
Visit(stmt->statement());
}
@@ -538,7 +956,7 @@ void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ZoneList<CaseClause*>* clauses = stmt->cases();
SwitchBuilder compare_switch(this, clauses->length());
- BreakableScope scope(this, stmt, &compare_switch, 0);
+ ControlScopeForBreakable scope(this, stmt, &compare_switch);
compare_switch.BeginSwitch();
int default_index = -1;
@@ -589,7 +1007,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder while_loop(this);
- while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
+ while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
VisitIterationBody(stmt, &while_loop, 0);
while_loop.EndBody();
VisitForTest(stmt->cond());
@@ -601,7 +1019,7 @@ void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
LoopBuilder while_loop(this);
- while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
+ while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
while_loop.BreakUnless(condition);
@@ -614,7 +1032,7 @@ void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
LoopBuilder for_loop(this);
VisitIfNotNull(stmt->init());
- for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
+ for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
if (stmt->cond() != NULL) {
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
@@ -653,24 +1071,20 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
PrepareFrameState(obj, stmt->ToObjectId(), OutputFrameStateCombine::Push());
environment()->Push(obj);
// TODO(dcarney): should do a fast enum cache check here to skip runtime.
- environment()->Push(obj);
- Node* cache_type = ProcessArguments(
- javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), 1);
+ Node* cache_type = NewNode(
+ javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), obj);
PrepareFrameState(cache_type, stmt->EnumId(),
OutputFrameStateCombine::Push());
// TODO(dcarney): these next runtime calls should be removed in favour of
// a few simplified instructions.
- environment()->Push(obj);
- environment()->Push(cache_type);
- Node* cache_pair =
- ProcessArguments(javascript()->CallRuntime(Runtime::kForInInit, 2), 2);
+ Node* cache_pair = NewNode(
+ javascript()->CallRuntime(Runtime::kForInInit, 2), obj, cache_type);
// cache_type may have been replaced.
Node* cache_array = NewNode(common()->Projection(0), cache_pair);
cache_type = NewNode(common()->Projection(1), cache_pair);
- environment()->Push(cache_type);
- environment()->Push(cache_array);
- Node* cache_length = ProcessArguments(
- javascript()->CallRuntime(Runtime::kForInCacheArrayLength, 2), 2);
+ Node* cache_length =
+ NewNode(javascript()->CallRuntime(Runtime::kForInCacheArrayLength, 2),
+ cache_type, cache_array);
{
// TODO(dcarney): this check is actually supposed to be for the
// empty enum case only.
@@ -688,89 +1102,9 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
environment()->Push(cache_array);
environment()->Push(cache_length);
environment()->Push(jsgraph()->ZeroConstant());
- // PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- LoopBuilder for_loop(this);
- for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
- // Check loop termination condition.
- Node* index = environment()->Peek(0);
- Node* exit_cond =
- NewNode(javascript()->LessThan(), index, cache_length);
- // TODO(jarin): provide real bailout id.
- PrepareFrameState(exit_cond, BailoutId::None());
- for_loop.BreakUnless(exit_cond);
- // TODO(dcarney): this runtime call should be a handful of
- // simplified instructions that
- // basically produce
- // value = array[index]
- environment()->Push(obj);
- environment()->Push(cache_array);
- environment()->Push(cache_type);
- environment()->Push(index);
- Node* pair = ProcessArguments(
- javascript()->CallRuntime(Runtime::kForInNext, 4), 4);
- Node* value = NewNode(common()->Projection(0), pair);
- Node* should_filter = NewNode(common()->Projection(1), pair);
- environment()->Push(value);
- {
- // Test if FILTER_KEY needs to be called.
- IfBuilder test_should_filter(this);
- Node* should_filter_cond =
- NewNode(javascript()->StrictEqual(), should_filter,
- jsgraph()->TrueConstant());
- test_should_filter.If(should_filter_cond);
- test_should_filter.Then();
- value = environment()->Pop();
- Node* builtins = BuildLoadBuiltinsObject();
- Node* function = BuildLoadObjectField(
- builtins,
- JSBuiltinsObject::OffsetOfFunctionWithId(Builtins::FILTER_KEY));
- // Callee.
- environment()->Push(function);
- // Receiver.
- environment()->Push(obj);
- // Args.
- environment()->Push(value);
- // result is either the string key or Smi(0) indicating the property
- // is gone.
- Node* res = ProcessArguments(
- javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS), 3);
- // TODO(jarin): provide real bailout id.
- PrepareFrameState(res, BailoutId::None());
- Node* property_missing = NewNode(javascript()->StrictEqual(), res,
- jsgraph()->ZeroConstant());
- {
- IfBuilder is_property_missing(this);
- is_property_missing.If(property_missing);
- is_property_missing.Then();
- // Inc counter and continue.
- Node* index_inc =
- NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
- // TODO(jarin): provide real bailout id.
- PrepareFrameState(index_inc, BailoutId::None());
- environment()->Poke(0, index_inc);
- for_loop.Continue();
- is_property_missing.Else();
- is_property_missing.End();
- }
- // Replace 'value' in environment.
- environment()->Push(res);
- test_should_filter.Else();
- test_should_filter.End();
- }
- value = environment()->Pop();
- // Bind value and do loop body.
- VisitForInAssignment(stmt->each(), value);
- VisitIterationBody(stmt, &for_loop, 5);
- for_loop.EndBody();
- // Inc counter and continue.
- Node* index_inc =
- NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
- // TODO(jarin): provide real bailout id.
- PrepareFrameState(index_inc, BailoutId::None());
- environment()->Poke(0, index_inc);
- for_loop.EndLoop();
- environment()->Drop(5);
- // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+
+ // Build the actual loop body.
+ VisitForInBody(stmt);
}
have_no_properties.End();
}
@@ -780,20 +1114,165 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
}
+// TODO(dcarney): this is a big function. Try to clean up some.
+void AstGraphBuilder::VisitForInBody(ForInStatement* stmt) {
+ LoopBuilder for_loop(this);
+ for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
+
+ // These stack values are renamed in the case of OSR, so reload them
+ // from the environment.
+ Node* index = environment()->Peek(0);
+ Node* cache_length = environment()->Peek(1);
+ Node* cache_array = environment()->Peek(2);
+ Node* cache_type = environment()->Peek(3);
+ Node* obj = environment()->Peek(4);
+
+ // Check loop termination condition.
+ Node* exit_cond = NewNode(javascript()->LessThan(), index, cache_length);
+ // TODO(jarin): provide real bailout id.
+ PrepareFrameState(exit_cond, BailoutId::None());
+ for_loop.BreakUnless(exit_cond);
+ Node* pair = NewNode(javascript()->CallRuntime(Runtime::kForInNext, 4), obj,
+ cache_array, cache_type, index);
+ Node* value = NewNode(common()->Projection(0), pair);
+ Node* should_filter = NewNode(common()->Projection(1), pair);
+ environment()->Push(value);
+ {
+ // Test if FILTER_KEY needs to be called.
+ IfBuilder test_should_filter(this);
+ Node* should_filter_cond = NewNode(
+ javascript()->StrictEqual(), should_filter, jsgraph()->TrueConstant());
+ test_should_filter.If(should_filter_cond);
+ test_should_filter.Then();
+ value = environment()->Pop();
+ Node* builtins = BuildLoadBuiltinsObject();
+ Node* function = BuildLoadObjectField(
+ builtins,
+ JSBuiltinsObject::OffsetOfFunctionWithId(Builtins::FILTER_KEY));
+ // result is either the string key or Smi(0) indicating the property
+ // is gone.
+ Node* res = NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+ function, obj, value);
+ // TODO(jarin): provide real bailout id.
+ PrepareFrameState(res, BailoutId::None());
+ Node* property_missing =
+ NewNode(javascript()->StrictEqual(), res, jsgraph()->ZeroConstant());
+ {
+ IfBuilder is_property_missing(this);
+ is_property_missing.If(property_missing);
+ is_property_missing.Then();
+ // Inc counter and continue.
+ Node* index_inc =
+ NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+ // TODO(jarin): provide real bailout id.
+ PrepareFrameState(index_inc, BailoutId::None());
+ environment()->Poke(0, index_inc);
+ for_loop.Continue();
+ is_property_missing.Else();
+ is_property_missing.End();
+ }
+ // Replace 'value' in environment.
+ environment()->Push(res);
+ test_should_filter.Else();
+ test_should_filter.End();
+ }
+ value = environment()->Pop();
+ // Bind value and do loop body.
+ VisitForInAssignment(stmt->each(), value, stmt->AssignmentId());
+ VisitIterationBody(stmt, &for_loop, 5);
+ for_loop.EndBody();
+ // Inc counter and continue.
+ Node* index_inc =
+ NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+ // TODO(jarin): provide real bailout id.
+ PrepareFrameState(index_inc, BailoutId::None());
+ environment()->Poke(0, index_inc);
+ for_loop.EndLoop();
+ environment()->Drop(5);
+ // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
- VisitForValue(stmt->subject());
- environment()->Pop();
- // TODO(turbofan): create and use loop builder.
+ LoopBuilder for_loop(this);
+ VisitForEffect(stmt->assign_iterator());
+ for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
+ VisitForEffect(stmt->next_result());
+ VisitForTest(stmt->result_done());
+ Node* condition = environment()->Pop();
+ for_loop.BreakWhen(condition);
+ VisitForEffect(stmt->assign_each());
+ VisitIterationBody(stmt, &for_loop, 0);
+ for_loop.EndBody();
+ for_loop.EndLoop();
}
void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
+ TryCatchBuilder try_control(this);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting 'throw' control commands.
+ try_control.BeginTry();
+ {
+ ControlScopeForCatch scope(this, &try_control);
+ Visit(stmt->try_block());
+ }
+ try_control.EndTry();
+
+ // Create a catch scope that binds the exception.
+ Node* exception = try_control.GetExceptionNode();
+ Unique<String> name = MakeUnique(stmt->variable()->name());
+ const Operator* op = javascript()->CreateCatchContext(name);
+ Node* context = NewNode(op, exception, GetFunctionClosure());
+ PrepareFrameState(context, BailoutId::None());
+ {
+ ContextScope scope(this, stmt->scope(), context);
+ DCHECK(stmt->scope()->declarations()->is_empty());
+ // Evaluate the catch-block.
+ Visit(stmt->catch_block());
+ }
+ try_control.EndCatch();
+
+ // TODO(mstarzinger): Remove bailout once everything works.
+ if (!FLAG_turbo_exceptions) SetStackOverflow();
}
void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- UNREACHABLE();
+ TryFinallyBuilder try_control(this);
+
+ // We keep a record of all paths that enter the finally-block to be able to
+ // dispatch to the correct continuation point after the statements in the
+ // finally-block have been evaluated.
+ //
+ // The try-finally construct can enter the finally-block in three ways:
+ // 1. By exiting the try-block normally, falling through at the end.
+ // 2. By exiting the try-block with a function-local control flow transfer
+ // (i.e. through break/continue/return statements).
+ // 3. By exiting the try-block with a thrown exception.
+ ControlScope::DeferredCommands* commands =
+ new (zone()) ControlScope::DeferredCommands(this);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting all control commands.
+ try_control.BeginTry();
+ {
+ ControlScopeForFinally scope(this, commands, &try_control);
+ Visit(stmt->try_block());
+ }
+ try_control.EndTry(commands->GetFallThroughToken());
+
+ // Evaluate the finally-block.
+ Visit(stmt->finally_block());
+ try_control.EndFinally();
+
+ // Dynamic dispatch after the finally-block.
+ Node* token = try_control.GetDispatchTokenNode();
+ commands->ApplyDeferredCommands(token);
+
+ // TODO(mstarzinger): Remove bailout once everything works.
+ if (!FLAG_turbo_exceptions) SetStackOverflow();
}
@@ -826,7 +1305,104 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
- UNREACHABLE();
+ if (expr->scope() == NULL) {
+ // Visit class literal in the same scope, no declarations.
+ VisitClassLiteralContents(expr);
+ } else {
+ // Visit declarations and class literal in a block scope.
+ Node* context = BuildLocalBlockContext(expr->scope());
+ ContextScope scope(this, expr->scope(), context);
+ VisitDeclarations(expr->scope()->declarations());
+ VisitClassLiteralContents(expr);
+ }
+}
+
+
+void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
+ Node* class_name = expr->raw_name() ? jsgraph()->Constant(expr->name())
+ : jsgraph()->UndefinedConstant();
+
+ // The class name is expected on the operand stack.
+ environment()->Push(class_name);
+ VisitForValueOrTheHole(expr->extends());
+ VisitForValue(expr->constructor());
+
+ // Create node to instantiate a new class.
+ Node* constructor = environment()->Pop();
+ Node* extends = environment()->Pop();
+ Node* name = environment()->Pop();
+ Node* script = jsgraph()->Constant(info()->script());
+ Node* start = jsgraph()->Constant(expr->start_position());
+ Node* end = jsgraph()->Constant(expr->end_position());
+ const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass, 6);
+ Node* literal = NewNode(opc, name, extends, constructor, script, start, end);
+
+ // The prototype is ensured to exist by Runtime_DefineClass. No access check
+ // is needed here since the constructor is created by the class literal.
+ Node* proto =
+ BuildLoadObjectField(literal, JSFunction::kPrototypeOrInitialMapOffset);
+
+ // The class literal and the prototype are both expected on the operand stack
+ // during evaluation of the method values.
+ environment()->Push(literal);
+ environment()->Push(proto);
+
+ // Create nodes to store method values into the literal.
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ environment()->Push(property->is_static() ? literal : proto);
+
+ VisitForValue(property->key());
+ environment()->Push(
+ BuildToName(environment()->Pop(), expr->GetIdForProperty(i)));
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* key = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ BuildSetHomeObject(value, receiver, property->value());
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED: {
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kDefineClassMethod, 3);
+ NewNode(op, receiver, key, value);
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ Node* attr = jsgraph()->Constant(DONT_ENUM);
+ const Operator* op = javascript()->CallRuntime(
+ Runtime::kDefineGetterPropertyUnchecked, 4);
+ NewNode(op, receiver, key, value, attr);
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ Node* attr = jsgraph()->Constant(DONT_ENUM);
+ const Operator* op = javascript()->CallRuntime(
+ Runtime::kDefineSetterPropertyUnchecked, 4);
+ NewNode(op, receiver, key, value, attr);
+ break;
+ }
+ }
+ }
+
+ // Transform both the class literal and the prototype to fast properties.
+ const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties, 1);
+ NewNode(op, environment()->Pop()); // prototype
+ NewNode(op, environment()->Pop()); // literal
+
+ // Assign to class variable.
+ if (expr->scope() != NULL) {
+ DCHECK_NOT_NULL(expr->class_variable_proxy());
+ Variable* var = expr->class_variable_proxy()->var();
+ BuildVariableAssignment(var, literal, Token::INIT_CONST, BailoutId::None());
+ }
+
+ PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(literal);
}
@@ -905,12 +1481,14 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
expr->CalculateEmitStore(zone());
// Create nodes to store computed values into the literal.
+ int property_index = 0;
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
@@ -925,9 +1503,11 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForValue(property->value());
Node* value = environment()->Pop();
Unique<Name> name = MakeUnique(key->AsPropertyName());
- Node* store = NewNode(javascript()->StoreNamed(strict_mode(), name),
- literal, value);
+ Node* store =
+ NewNode(javascript()->StoreNamed(language_mode(), name),
+ literal, value);
PrepareFrameState(store, key->id());
+ BuildSetHomeObject(value, literal, property->value());
} else {
VisitForEffect(property->value());
}
@@ -940,10 +1520,11 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* key = environment()->Pop();
Node* receiver = environment()->Pop();
if (property->emit_store()) {
- Node* strict = jsgraph()->Constant(SLOPPY);
+ Node* language = jsgraph()->Constant(SLOPPY);
const Operator* op =
javascript()->CallRuntime(Runtime::kSetProperty, 4);
- NewNode(op, receiver, key, value, strict);
+ NewNode(op, receiver, key, value, language);
+ BuildSetHomeObject(value, receiver, property->value());
}
break;
}
@@ -952,21 +1533,23 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForValue(property->value());
Node* value = environment()->Pop();
Node* receiver = environment()->Pop();
- if (property->emit_store()) {
- const Operator* op =
- javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
- Node* set_prototype = NewNode(op, receiver, value);
- // SetPrototype should not lazy deopt on an object
- // literal.
- PrepareFrameState(set_prototype, BailoutId::None());
- }
+ DCHECK(property->emit_store());
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+ Node* set_prototype = NewNode(op, receiver, value);
+ // SetPrototype should not lazy deopt on an object literal.
+ PrepareFrameState(set_prototype, BailoutId::None());
break;
}
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = property->value();
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = property->value();
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = property->value();
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = property->value();
+ }
break;
}
}
@@ -977,7 +1560,9 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
it != accessor_table.end(); ++it) {
VisitForValue(it->first);
VisitForValueOrNull(it->second->getter);
+ BuildSetHomeObject(environment()->Top(), literal, it->second->getter);
VisitForValueOrNull(it->second->setter);
+ BuildSetHomeObject(environment()->Top(), literal, it->second->setter);
Node* setter = environment()->Pop();
Node* getter = environment()->Pop();
Node* name = environment()->Pop();
@@ -989,6 +1574,68 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareFrameState(call, BailoutId::None());
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
+ // with the first computed property name and continues with all properties to
+ // its right. All the code from above initializes the static component of the
+ // object literal, and arranges for the map of the result to reflect the
+ // static order in which the keys appear. For the dynamic properties, we
+ // compile them into a series of "SetOwnProperty" runtime calls. This will
+ // preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ environment()->Push(literal); // Duplicate receiver.
+ VisitForValue(property->key());
+ environment()->Push(BuildToName(environment()->Pop(),
+ expr->GetIdForProperty(property_index)));
+ // TODO(mstarzinger): For ObjectLiteral::Property::PROTOTYPE the key should
+ // not be on the operand stack while the value is being evaluated. Come up
+ // with a repro for this and fix it. Also find a nice way to do so. :)
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* key = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ BuildSetHomeObject(value, receiver, property->value());
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
+ Node* attr = jsgraph()->Constant(NONE);
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ Node* call = NewNode(op, receiver, key, value, attr);
+ PrepareFrameState(call, BailoutId::None());
+ break;
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+ Node* call = NewNode(op, receiver, value);
+ PrepareFrameState(call, BailoutId::None());
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ Node* attr = jsgraph()->Constant(NONE);
+ const Operator* op = javascript()->CallRuntime(
+ Runtime::kDefineGetterPropertyUnchecked, 4);
+ Node* call = NewNode(op, receiver, key, value, attr);
+ PrepareFrameState(call, BailoutId::None());
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ Node* attr = jsgraph()->Constant(NONE);
+ const Operator* op = javascript()->CallRuntime(
+ Runtime::kDefineSetterPropertyUnchecked, 4);
+ Node* call = NewNode(op, receiver, key, value, attr);
+ PrepareFrameState(call, BailoutId::None());
+ break;
+ }
+ }
+ }
+
// Transform literals that contain functions to fast properties.
if (expr->has_function()) {
const Operator* op =
@@ -1013,6 +1660,8 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
const Operator* op =
javascript()->CallRuntime(Runtime::kCreateArrayLiteral, 4);
Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+ PrepareFrameState(literal, expr->CreateLiteralId(),
+ OutputFrameStateCombine::Push());
// The array and the literal index are both expected on the operand stack
// during computation of the element values.
@@ -1028,7 +1677,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForValue(subexpr);
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(i);
- Node* store = NewNode(javascript()->StoreProperty(strict_mode()), literal,
+ Node* store = NewNode(javascript()->StoreProperty(language_mode()), literal,
index, value);
PrepareFrameState(store, expr->GetIdForElement(i));
}
@@ -1038,7 +1687,8 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
+void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
+ BailoutId bailout_id) {
DCHECK(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a variable slot.
@@ -1049,8 +1699,7 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
- // TODO(jarin) Fill in the correct bailout id.
- BuildVariableAssignment(var, value, Token::ASSIGN, BailoutId::None());
+ BuildVariableAssignment(var, value, Token::ASSIGN, bailout_id);
break;
}
case NAMED_PROPERTY: {
@@ -1060,10 +1709,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
value = environment()->Pop();
Unique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store =
- NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
- // TODO(jarin) Fill in the correct bailout id.
- PrepareFrameState(store, BailoutId::None());
+ Node* store = NewNode(javascript()->StoreNamed(language_mode(), name),
+ object, value);
+ PrepareFrameState(store, bailout_id);
break;
}
case KEYED_PROPERTY: {
@@ -1073,10 +1721,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
- key, value);
- // TODO(jarin) Fill in the correct bailout id.
- PrepareFrameState(store, BailoutId::None());
+ Node* store = NewNode(javascript()->StoreProperty(language_mode()),
+ object, key, value);
+ PrepareFrameState(store, bailout_id);
break;
}
}
@@ -1164,8 +1811,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* object = environment()->Pop();
Unique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store =
- NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+ Node* store = NewNode(javascript()->StoreNamed(language_mode(), name),
+ object, value);
PrepareFrameState(store, expr->AssignmentId(),
ast_context()->GetStateCombine());
break;
@@ -1173,8 +1820,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
- key, value);
+ Node* store = NewNode(javascript()->StoreProperty(language_mode()),
+ object, key, value);
PrepareFrameState(store, expr->AssignmentId(),
ast_context()->GetStateCombine());
break;
@@ -1186,11 +1833,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
void AstGraphBuilder::VisitYield(Yield* expr) {
- VisitForValue(expr->generator_object());
- VisitForValue(expr->expression());
- environment()->Pop();
- environment()->Pop();
- // TODO(turbofan): VisitYield
+ // TODO(turbofan): Implement yield here.
+ SetStackOverflow();
ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
}
@@ -1198,10 +1842,16 @@ void AstGraphBuilder::VisitYield(Yield* expr) {
void AstGraphBuilder::VisitThrow(Throw* expr) {
VisitForValue(expr->exception());
Node* exception = environment()->Pop();
- const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
- Node* value = NewNode(op, exception);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
- ast_context()->ProduceValue(value);
+ if (FLAG_turbo_exceptions) {
+ execution_control()->ThrowValue(exception);
+ ast_context()->ProduceValue(exception);
+ } else {
+ // TODO(mstarzinger): Temporary workaround for bailout-id for debugger.
+ const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
+ Node* value = NewNode(op, exception);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
+ }
}
@@ -1282,11 +1932,12 @@ void AstGraphBuilder::VisitCall(Call* expr) {
flags = CALL_AS_METHOD;
break;
}
- case Call::SUPER_CALL: {
- // todo(dslomov): implement super calls in turbofan.
- UNIMPLEMENTED();
+ case Call::SUPER_CALL:
+ // TODO(dslomov): Implement super calls.
+ callee_value = jsgraph()->UndefinedConstant();
+ receiver_value = jsgraph()->UndefinedConstant();
+ SetStackOverflow();
break;
- }
case Call::POSSIBLY_EVAL_CALL:
possibly_eval = true;
// Fall through.
@@ -1319,12 +1970,12 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// provide a fully resolved callee and the corresponding receiver.
Node* function = GetFunctionClosure();
Node* receiver = environment()->Lookup(info()->scope()->receiver());
- Node* strict = jsgraph()->Constant(strict_mode());
+ Node* language = jsgraph()->Constant(language_mode());
Node* position = jsgraph()->Constant(info()->scope()->start_position());
const Operator* op =
javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
Node* pair =
- NewNode(op, callee, source, function, receiver, strict, position);
+ NewNode(op, callee, source, function, receiver, language, position);
PrepareFrameState(pair, expr->EvalOrLookupId(),
OutputFrameStateCombine::PokeAt(arg_count + 1));
Node* new_callee = NewNode(common()->Projection(0), pair);
@@ -1480,6 +2131,8 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Convert old value into a number.
old_value = NewNode(javascript()->ToNumber(), old_value);
+ PrepareFrameState(old_value, expr->ToNumberId(),
+ OutputFrameStateCombine::Push());
// Save result for postfix expressions at correct stack depth.
if (is_postfix) environment()->Poke(stack_depth, old_value);
@@ -1505,8 +2158,8 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* object = environment()->Pop();
Unique<Name> name =
MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store =
- NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+ Node* store = NewNode(javascript()->StoreNamed(language_mode(), name),
+ object, value);
environment()->Push(value);
PrepareFrameState(store, expr->AssignmentId());
environment()->Pop();
@@ -1515,8 +2168,8 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
- key, value);
+ Node* store = NewNode(javascript()->StoreProperty(language_mode()),
+ object, key, value);
environment()->Push(value);
PrepareFrameState(store, expr->AssignmentId());
environment()->Pop();
@@ -1605,7 +2258,9 @@ void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
void AstGraphBuilder::VisitSuperReference(SuperReference* expr) {
- UNREACHABLE();
+ // TODO(turbofan): Implement super here.
+ SetStackOverflow();
+ ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
}
@@ -1622,7 +2277,7 @@ void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsStrictMode::encode(strict_mode());
+ DeclareGlobalsLanguageMode::encode(language_mode());
Node* flags = jsgraph()->Constant(encoded_flags);
Node* pairs = jsgraph()->Constant(data);
const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 3);
@@ -1638,8 +2293,8 @@ void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
- LoopBuilder* loop, int drop_extra) {
- BreakableScope scope(this, stmt, loop, drop_extra);
+ LoopBuilder* loop, int stack_delta) {
+ ControlScopeForIteration scope(this, stmt, loop, stack_delta);
Visit(stmt->body());
}
@@ -1650,7 +2305,7 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
// Delete of an unqualified identifier is only allowed in classic mode but
// deleting "this" is allowed in all language modes.
Variable* variable = expr->expression()->AsVariableProxy()->var();
- DCHECK(strict_mode() == SLOPPY || variable->is_this());
+ DCHECK(is_sloppy(language_mode()) || variable->is_this());
value = BuildVariableDelete(variable, expr->id(),
ast_context()->GetStateCombine());
} else if (expr->expression()->IsProperty()) {
@@ -1659,7 +2314,7 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
VisitForValue(property->key());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- value = NewNode(javascript()->DeleteProperty(strict_mode()), object, key);
+ value = NewNode(javascript()->DeleteProperty(language_mode()), object, key);
PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
} else {
VisitForEffect(expr->expression());
@@ -1735,8 +2390,8 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
}
-StrictMode AstGraphBuilder::strict_mode() const {
- return info()->strict_mode();
+LanguageMode AstGraphBuilder::language_mode() const {
+ return info()->language_mode();
}
@@ -1761,7 +2416,7 @@ Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object). Otherwise there is nothing left to do here.
- if (info()->strict_mode() != SLOPPY || info()->is_native()) return receiver;
+ if (is_strict(language_mode()) || info()->is_native()) return receiver;
// There is no need to perform patching if the receiver is never used. Note
// that scope predicates are purely syntactical, a call to eval might still
@@ -1785,13 +2440,9 @@ Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots <= 0) return context;
-
// Allocate a new local context.
const Operator* op = javascript()->CreateFunctionContext();
Node* local_context = NewNode(op, closure);
- set_current_context(local_context);
// Copy parameters into context if necessary.
int num_parameters = info()->scope()->num_parameters();
@@ -1811,6 +2462,18 @@ Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
}
+Node* AstGraphBuilder::BuildLocalBlockContext(Scope* scope) {
+ Node* closure = GetFunctionClosure();
+
+ // Allocate a new local context.
+ const Operator* op = javascript()->CreateBlockContext();
+ Node* scope_info = jsgraph()->Constant(scope->GetScopeInfo(info_->isolate()));
+ Node* local_context = NewNode(op, scope_info, closure);
+
+ return local_context;
+}
+
+
Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
if (arguments == NULL) return NULL;
@@ -1828,6 +2491,22 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
}
+Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
+ if (rest == NULL) return NULL;
+
+ DCHECK(index >= 0);
+ const Operator* op = javascript()->CallRuntime(Runtime::kNewRestParamSlow, 1);
+ Node* object = NewNode(op, jsgraph()->SmiConstant(index));
+
+ // Assign the object to the rest array
+ DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
+ // This should never lazy deopt, so it is fine to send invalid bailout id.
+ BuildVariableAssignment(rest, object, Token::ASSIGN, BailoutId::None());
+
+ return object;
+}
+
+
Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
Node* not_hole) {
IfBuilder hole_check(this);
@@ -1944,7 +2623,7 @@ Node* AstGraphBuilder::BuildVariableDelete(
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
Node* name = jsgraph()->Constant(variable->name());
- const Operator* op = javascript()->DeleteProperty(strict_mode());
+ const Operator* op = javascript()->DeleteProperty(language_mode());
Node* result = NewNode(op, global, name);
PrepareFrameState(result, bailout_id, state_combine);
return result;
@@ -1979,7 +2658,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
Unique<Name> name = MakeUnique(variable->name());
- const Operator* op = javascript()->StoreNamed(strict_mode(), name);
+ const Operator* op = javascript()->StoreNamed(language_mode(), name);
Node* store = NewNode(op, global, value);
PrepareFrameState(store, bailout_id, combine);
return store;
@@ -1997,7 +2676,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Non-initializing assignments to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
return BuildThrowConstAssignError(bailout_id);
}
return value;
@@ -2031,7 +2710,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Non-initializing assignments to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
return BuildThrowConstAssignError(bailout_id);
}
return value;
@@ -2051,12 +2730,12 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case Variable::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Node* name = jsgraph()->Constant(variable->name());
- Node* strict = jsgraph()->Constant(strict_mode());
+ Node* language = jsgraph()->Constant(language_mode());
// TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
// initializations of const declarations.
const Operator* op =
javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
- Node* store = NewNode(op, value, current_context(), name, strict);
+ Node* store = NewNode(op, value, current_context(), name, language);
PrepareFrameState(store, bailout_id, combine);
return store;
}
@@ -2082,10 +2761,9 @@ Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
Node* AstGraphBuilder::BuildLoadGlobalObject() {
- Node* context = GetFunctionContext();
const Operator* load_op =
javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true);
- return NewNode(load_op, context);
+ return NewNode(load_op, function_context_.get());
}
@@ -2098,7 +2776,7 @@ Node* AstGraphBuilder::BuildLoadGlobalProxy() {
Node* AstGraphBuilder::BuildToBoolean(Node* input) {
- // TODO(titzer): this should be in a JSOperatorReducer.
+ // TODO(titzer): This should be in a JSOperatorReducer.
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
return jsgraph_->BooleanConstant(!Int32Matcher(input).Is(0));
@@ -2108,10 +2786,7 @@ Node* AstGraphBuilder::BuildToBoolean(Node* input) {
return jsgraph_->BooleanConstant(!NumberMatcher(input).Is(0));
case IrOpcode::kHeapConstant: {
Handle<Object> object = HeapObjectMatcher<Object>(input).Value().handle();
- if (object->IsTrue()) return jsgraph_->TrueConstant();
- if (object->IsFalse()) return jsgraph_->FalseConstant();
- // TODO(turbofan): other constants.
- break;
+ return jsgraph_->BooleanConstant(object->BooleanValue());
}
default:
break;
@@ -2125,6 +2800,27 @@ Node* AstGraphBuilder::BuildToBoolean(Node* input) {
}
+Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
+ // TODO(turbofan): Possible optimization is to NOP on name constants. But the
+ // same caveat as with BuildToBoolean applies, and it should be factored out
+ // into a JSOperatorReducer.
+ Node* name = NewNode(javascript()->ToName(), input);
+ PrepareFrameState(name, bailout_id);
+ return name;
+}
+
+
+Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
+ Expression* expr) {
+ if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
+ Unique<Name> name = MakeUnique(isolate()->factory()->home_object_symbol());
+ const Operator* op = javascript()->StoreNamed(language_mode(), name);
+ Node* store = NewNode(op, value, home_object);
+ PrepareFrameState(store, BailoutId::None());
+ return store;
+}
+
+
Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
BailoutId bailout_id) {
// TODO(mstarzinger): Should be unified with the VisitThrow implementation.
@@ -2147,6 +2843,21 @@ Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
}
+Node* AstGraphBuilder::BuildReturn(Node* return_value) {
+ Node* control = NewNode(common()->Return(), return_value);
+ UpdateControlDependencyToLeaveFunction(control);
+ return control;
+}
+
+
+Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
+ NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), exception_value);
+ Node* control = NewNode(common()->Throw(), exception_value);
+ UpdateControlDependencyToLeaveFunction(control);
+ return control;
+}
+
+
Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
const Operator* js_op;
switch (op) {
@@ -2209,6 +2920,16 @@ Node* AstGraphBuilder::BuildStackCheck() {
}
+bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
+ if (info()->osr_ast_id() == stmt->OsrEntryId()) {
+ info()->set_osr_expr_stack_height(std::max(
+ environment()->stack_height(), info()->osr_expr_stack_height()));
+ return true;
+ }
+ return false;
+}
+
+
void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
OutputFrameStateCombine combine) {
if (OperatorProperties::HasFrameStateInput(node->op())) {
@@ -2226,6 +2947,271 @@ BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
return loop_assignment_analysis_->GetVariablesAssignedInLoop(stmt);
}
+
+Node** AstGraphBuilder::EnsureInputBufferSize(int size) {
+ if (size > input_buffer_size_) {
+ size = size + kInputBufferSizeIncrement + input_buffer_size_;
+ input_buffer_ = local_zone()->NewArray<Node*>(size);
+ input_buffer_size_ = size;
+ }
+ return input_buffer_;
+}
+
+
+Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
+ Node** value_inputs, bool incomplete) {
+ DCHECK(op->ValueInputCount() == value_input_count);
+
+ bool has_context = OperatorProperties::HasContextInput(op);
+ bool has_framestate = OperatorProperties::HasFrameStateInput(op);
+ bool has_control = op->ControlInputCount() == 1;
+ bool has_effect = op->EffectInputCount() == 1;
+
+ DCHECK(op->ControlInputCount() < 2);
+ DCHECK(op->EffectInputCount() < 2);
+
+ Node* result = NULL;
+ if (!has_context && !has_framestate && !has_control && !has_effect) {
+ result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
+ } else {
+ int input_count_with_deps = value_input_count;
+ if (has_context) ++input_count_with_deps;
+ if (has_framestate) ++input_count_with_deps;
+ if (has_control) ++input_count_with_deps;
+ if (has_effect) ++input_count_with_deps;
+ Node** buffer = EnsureInputBufferSize(input_count_with_deps);
+ memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ Node** current_input = buffer + value_input_count;
+ if (has_context) {
+ *current_input++ = current_context();
+ }
+ if (has_framestate) {
+ // The frame state will be inserted later. Here we misuse
+ // the {DeadControl} node as a sentinel to be later overwritten
+ // with the real frame state.
+ *current_input++ = jsgraph()->DeadControl();
+ }
+ if (has_effect) {
+ *current_input++ = environment_->GetEffectDependency();
+ }
+ if (has_control) {
+ *current_input++ = environment_->GetControlDependency();
+ }
+ result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
+ if (has_effect) {
+ environment_->UpdateEffectDependency(result);
+ }
+ if (result->op()->ControlOutputCount() > 0 &&
+ !environment()->IsMarkedAsUnreachable()) {
+ environment_->UpdateControlDependency(result);
+ }
+ }
+
+ return result;
+}
+
+
+void AstGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
+ if (environment()->IsMarkedAsUnreachable()) return;
+ if (exit_control() != NULL) {
+ exit = MergeControl(exit_control(), exit);
+ }
+ environment()->MarkAsUnreachable();
+ set_exit_control(exit);
+}
+
+
+void AstGraphBuilder::Environment::Merge(Environment* other) {
+ DCHECK(values_.size() == other->values_.size());
+ // TODO(titzer): make context stack heights match.
+ DCHECK(contexts_.size() <= other->contexts_.size());
+
+ // Nothing to do if the other environment is dead.
+ if (other->IsMarkedAsUnreachable()) return;
+
+ // Resurrect a dead environment by copying the contents of the other one and
+ // placing a singleton merge as the new control dependency.
+ if (this->IsMarkedAsUnreachable()) {
+ Node* other_control = other->control_dependency_;
+ Node* inputs[] = {other_control};
+ control_dependency_ =
+ graph()->NewNode(common()->Merge(1), arraysize(inputs), inputs, true);
+ effect_dependency_ = other->effect_dependency_;
+ values_ = other->values_;
+ // TODO(titzer): make context stack heights match.
+ size_t min = std::min(contexts_.size(), other->contexts_.size());
+ contexts_ = other->contexts_;
+ contexts_.resize(min, nullptr);
+ return;
+ }
+
+ // Create a merge of the control dependencies of both environments and update
+ // the current environment's control dependency accordingly.
+ Node* control = builder_->MergeControl(this->GetControlDependency(),
+ other->GetControlDependency());
+ UpdateControlDependency(control);
+
+ // Create a merge of the effect dependencies of both environments and update
+ // the current environment's effect dependency accordingly.
+ Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
+ other->GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Introduce Phi nodes for values that have differing input at merge points,
+ // potentially extending an existing Phi node if possible.
+ for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
+ values_[i] = builder_->MergeValue(values_[i], other->values_[i], control);
+ }
+ for (int i = 0; i < static_cast<int>(contexts_.size()); ++i) {
+ contexts_[i] =
+ builder_->MergeValue(contexts_[i], other->contexts_[i], control);
+ }
+}
+
+
+void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
+ bool is_osr) {
+ int size = static_cast<int>(values()->size());
+
+ Node* control = builder_->NewLoop();
+ if (assigned == nullptr) {
+ // Assume that everything is updated in the loop.
+ for (int i = 0; i < size; ++i) {
+ values()->at(i) = builder_->NewPhi(1, values()->at(i), control);
+ }
+ } else {
+ // Only build phis for those locals assigned in this loop.
+ for (int i = 0; i < size; ++i) {
+ if (i < assigned->length() && !assigned->Contains(i)) continue;
+ Node* phi = builder_->NewPhi(1, values()->at(i), control);
+ values()->at(i) = phi;
+ }
+ }
+ Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ if (builder_->info()->is_osr()) {
+ // Introduce phis for all context values in the case of an OSR graph.
+ for (int i = 0; i < static_cast<int>(contexts()->size()); ++i) {
+ Node* val = contexts()->at(i);
+ if (!IrOpcode::IsConstantOpcode(val->opcode())) {
+ contexts()->at(i) = builder_->NewPhi(1, val, control);
+ }
+ }
+ }
+
+ if (is_osr) {
+ // Merge OSR values as inputs to the phis of the loop.
+ Graph* graph = builder_->graph();
+ Node* osr_loop_entry = builder_->graph()->NewNode(
+ builder_->common()->OsrLoopEntry(), graph->start(), graph->start());
+
+ builder_->MergeControl(control, osr_loop_entry);
+ builder_->MergeEffect(effect, osr_loop_entry, control);
+
+ for (int i = 0; i < size; ++i) {
+ Node* val = values()->at(i);
+ if (!IrOpcode::IsConstantOpcode(val->opcode())) {
+ Node* osr_value =
+ graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
+ values()->at(i) = builder_->MergeValue(val, osr_value, control);
+ }
+ }
+
+ // Rename all the contexts in the environment.
+ // The innermost context is the OSR value, and the outer contexts are
+ // reconstructed by dynamically walking up the context chain.
+ Node* osr_context = nullptr;
+ const Operator* op =
+ builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
+ int last = static_cast<int>(contexts()->size() - 1);
+ for (int i = last; i >= 0; i--) {
+ Node* val = contexts()->at(i);
+ if (!IrOpcode::IsConstantOpcode(val->opcode())) {
+ osr_context = (i == last) ? builder_->NewCurrentContextOsrValue()
+ : graph->NewNode(op, osr_context, osr_context,
+ osr_loop_entry);
+ contexts()->at(i) = builder_->MergeValue(val, osr_context, control);
+ } else {
+ osr_context = val;
+ }
+ }
+ }
+}
+
+
+Node* AstGraphBuilder::NewPhi(int count, Node* input, Node* control) {
+ const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
+// TODO(mstarzinger): Revisit this once we have proper effect states.
+Node* AstGraphBuilder::NewEffectPhi(int count, Node* input, Node* control) {
+ const Operator* phi_op = common()->EffectPhi(count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
+Node* AstGraphBuilder::MergeControl(Node* control, Node* other) {
+ int inputs = control->op()->ControlInputCount() + 1;
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Control node for loop exists, add input.
+ const Operator* op = common()->Loop(inputs);
+ control->AppendInput(graph_zone(), other);
+ control->set_op(op);
+ } else if (control->opcode() == IrOpcode::kMerge) {
+ // Control node for merge exists, add input.
+ const Operator* op = common()->Merge(inputs);
+ control->AppendInput(graph_zone(), other);
+ control->set_op(op);
+ } else {
+ // Control node is a singleton, introduce a merge.
+ const Operator* op = common()->Merge(inputs);
+ Node* inputs[] = {control, other};
+ control = graph()->NewNode(op, arraysize(inputs), inputs, true);
+ }
+ return control;
+}
+
+
+Node* AstGraphBuilder::MergeEffect(Node* value, Node* other, Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->set_op(common()->EffectPhi(inputs));
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewEffectPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->set_op(common()->Phi(kMachAnyTagged, inputs));
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 0337c813b9..ebeb6c613c 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -5,63 +5,191 @@
#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
#define V8_COMPILER_AST_GRAPH_BUILDER_H_
-#include "src/v8.h"
-
#include "src/ast.h"
-#include "src/compiler/graph-builder.h"
#include "src/compiler/js-graph.h"
namespace v8 {
namespace internal {
+
+class BitVector;
+
namespace compiler {
class ControlBuilder;
class Graph;
class LoopAssignmentAnalysis;
class LoopBuilder;
+class Node;
// The AstGraphBuilder produces a high-level IR graph, based on an
// underlying AST. The produced graph can either be compiled into a
// stand-alone function or be wired into another graph for the purposes
// of function inlining.
-class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
+class AstGraphBuilder : public AstVisitor {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
LoopAssignmentAnalysis* loop_assignment = NULL);
// Creates a graph by visiting the entire AST.
- bool CreateGraph();
+ bool CreateGraph(bool constant_context);
+
+ // Helpers to create new control nodes.
+ Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+ Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewMerge() { return NewNode(common()->Merge(1), true); }
+ Node* NewLoop() { return NewNode(common()->Loop(1), true); }
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
+ }
protected:
+#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
+ // Visiting functions for AST nodes make this an AstVisitor.
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ // Visiting function for declarations list is overridden.
+ void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
+
+ private:
class AstContext;
class AstEffectContext;
class AstValueContext;
class AstTestContext;
- class BreakableScope;
class ContextScope;
+ class ControlScope;
+ class ControlScopeForBreakable;
+ class ControlScopeForIteration;
+ class ControlScopeForCatch;
+ class ControlScopeForFinally;
class Environment;
+ friend class ControlBuilder;
- Environment* environment() {
- return reinterpret_cast<Environment*>(
- StructuredGraphBuilder::environment());
- }
+ Zone* local_zone_;
+ CompilationInfo* info_;
+ JSGraph* jsgraph_;
+ Environment* environment_;
+ AstContext* ast_context_;
+ // List of global declarations for functions and variables.
+ ZoneVector<Handle<Object>> globals_;
+
+ // Stack of control scopes currently entered by the visitor.
+ ControlScope* execution_control_;
+
+ // Stack of context objects pushed onto the chain by the visitor.
+ ContextScope* execution_context_;
+
+ // Nodes representing values in the activation record.
+ SetOncePointer<Node> function_closure_;
+ SetOncePointer<Node> function_context_;
+
+ // Temporary storage for building node input lists.
+ int input_buffer_size_;
+ Node** input_buffer_;
+
+ // Merge of all control nodes that exit the function body.
+ Node* exit_control_;
+
+ // Result of loop assignment analysis performed before graph creation.
+ LoopAssignmentAnalysis* loop_assignment_analysis_;
+
+ // Growth increment for the temporary buffer used to construct input lists to
+ // new nodes.
+ static const int kInputBufferSizeIncrement = 64;
+
+ Zone* local_zone() const { return local_zone_; }
+ Environment* environment() const { return environment_; }
AstContext* ast_context() const { return ast_context_; }
- BreakableScope* breakable() const { return breakable_; }
+ ControlScope* execution_control() const { return execution_control_; }
ContextScope* execution_context() const { return execution_context_; }
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+ CompilationInfo* info() const { return info_; }
+ LanguageMode language_mode() const;
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph_->graph(); }
+ Zone* graph_zone() { return graph()->zone(); }
+ JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+ ZoneVector<Handle<Object>>* globals() { return &globals_; }
+ Scope* current_scope() const;
+ Node* current_context() const;
+ Node* exit_control() const { return exit_control_; }
+ void set_environment(Environment* env) { environment_ = env; }
void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
- void set_breakable(BreakableScope* brk) { breakable_ = brk; }
+ void set_execution_control(ControlScope* ctrl) { execution_control_ = ctrl; }
void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; }
+ void set_exit_control(Node* exit) { exit_control_ = exit; }
+
+ // Create the main graph body by visiting the AST.
+ void CreateGraphBody();
- // Support for control flow builders. The concrete type of the environment
- // depends on the graph builder, but environments themselves are not virtual.
- typedef StructuredGraphBuilder::Environment BaseEnvironment;
- BaseEnvironment* CopyEnvironment(BaseEnvironment* env) OVERRIDE;
+ // Create the node that represents the outer context of the function.
+ void CreateFunctionContext(bool constant_context);
- // Getters for values in the activation record.
+ // Get or create the node that represents the outer function closure.
Node* GetFunctionClosure();
- Node* GetFunctionContext();
+
+ // Node creation helpers.
+ Node* NewNode(const Operator* op, bool incomplete = false) {
+ return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1) {
+ return MakeNode(op, 1, &n1, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+ Node* buffer[] = {n1, n2};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5) {
+ Node* buffer[] = {n1, n2, n3, n4, n5};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+ return MakeNode(op, arraysize(nodes), nodes, false);
+ }
+
+ Node* NewNode(const Operator* op, int value_input_count, Node** value_inputs,
+ bool incomplete = false) {
+ return MakeNode(op, value_input_count, value_inputs, incomplete);
+ }
+
+ // Creates a new Phi node having {count} input values.
+ Node* NewPhi(int count, Node* input, Node* control);
+ Node* NewEffectPhi(int count, Node* input, Node* control);
+
+ Node* NewOuterContextParam();
+ Node* NewCurrentContextOsrValue();
+
+ // Helpers for merging control, effect or value dependencies.
+ Node* MergeControl(Node* control, Node* other);
+ Node* MergeEffect(Node* value, Node* other, Node* control);
+ Node* MergeValue(Node* value, Node* other, Node* control);
+
+ // The main node creation chokepoint. Adds context, frame state, effect,
+ // and control dependencies depending on the operator.
+ Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
+ bool incomplete);
+
+ // Helper to indicate a node exits the function body.
+ void UpdateControlDependencyToLeaveFunction(Node* exit);
//
// The following build methods all generate graph fragments and return one
@@ -72,12 +200,16 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
// Builder to create a receiver check for sloppy mode.
Node* BuildPatchReceiverToGlobalProxy(Node* receiver);
- // Builder to create a local function context.
+ // Builders to create local function and block contexts.
Node* BuildLocalFunctionContext(Node* context, Node* closure);
+ Node* BuildLocalBlockContext(Scope* scope);
// Builder to create an arguments object if it is used.
Node* BuildArgumentsObject(Variable* arguments);
+ // Builder to create an array of rest parameters if used
+ Node* BuildRestArgumentsArray(Variable* rest, int index);
+
// Builders for variable load and assignment.
Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op,
BailoutId bailout_id,
@@ -98,6 +230,11 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
// Builders for automatic type conversion.
Node* BuildToBoolean(Node* value);
+ Node* BuildToName(Node* value, BailoutId bailout_id);
+
+ // Builder for adding the [[HomeObject]] to a value if the value came from a
+ // function literal and needs a home object. Do nothing otherwise.
+ Node* BuildSetHomeObject(Node* value, Node* home_object, Expression* expr);
// Builders for error reporting at runtime.
Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
@@ -108,49 +245,27 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole,
BailoutId bailout_id);
+ // Builders for non-local control flow.
+ Node* BuildReturn(Node* return_value);
+ Node* BuildThrow(Node* exception_value);
+
// Builders for binary operations.
Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
// Builder for stack-check guards.
Node* BuildStackCheck();
-#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
- // Visiting functions for AST nodes make this an AstVisitor.
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- // Visiting function for declarations list is overridden.
- void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
+ // Check if the given statement is an OSR entry.
+ // If so, record the stack height into the compilation and return {true}.
+ bool CheckOsrEntry(IterationStatement* stmt);
- private:
- CompilationInfo* info_;
- AstContext* ast_context_;
- JSGraph* jsgraph_;
-
- // List of global declarations for functions and variables.
- ZoneVector<Handle<Object>> globals_;
-
- // Stack of breakable statements entered by the visitor.
- BreakableScope* breakable_;
-
- // Stack of context objects pushed onto the chain by the visitor.
- ContextScope* execution_context_;
-
- // Nodes representing values in the activation record.
- SetOncePointer<Node> function_closure_;
- SetOncePointer<Node> function_context_;
-
- // Result of loop assignment analysis performed before graph creation.
- LoopAssignmentAnalysis* loop_assignment_analysis_;
-
- CompilationInfo* info() const { return info_; }
- inline StrictMode strict_mode() const;
- JSGraph* jsgraph() { return jsgraph_; }
- JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
- ZoneVector<Handle<Object>>* globals() { return &globals_; }
+ // Helper to wrap a Handle<T> into a Unique<T>.
+ template <class T>
+ Unique<T> MakeUnique(Handle<T> object) {
+ return Unique<T>::CreateUninitialized(object);
+ }
- // Current scope during visitation.
- inline Scope* current_scope() const;
+ Node** EnsureInputBufferSize(int size);
// Named and keyed loads require a VectorSlotPair for successful lowering.
VectorSlotPair CreateVectorSlotPair(FeedbackVectorICSlot slot) const;
@@ -168,6 +283,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
void VisitForEffect(Expression* expr);
void VisitForValue(Expression* expr);
void VisitForValueOrNull(Expression* expr);
+ void VisitForValueOrTheHole(Expression* expr);
void VisitForValues(ZoneList<Expression*>* exprs);
// Common for all IterationStatement bodies.
@@ -188,7 +304,12 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
void VisitArithmeticExpression(BinaryOperation* expr);
// Dispatched from VisitForInStatement.
- void VisitForInAssignment(Expression* expr, Node* value);
+ void VisitForInAssignment(Expression* expr, Node* value,
+ BailoutId bailout_id);
+ void VisitForInBody(ForInStatement* stmt);
+
+ // Dispatched from VisitClassLiteral.
+ void VisitClassLiteralContents(ClassLiteral* expr);
// Builds deoptimization for a given node.
void PrepareFrameState(
@@ -210,11 +331,9 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
//
// [parameters (+receiver)] [locals] [operand stack]
//
-class AstGraphBuilder::Environment
- : public StructuredGraphBuilder::Environment {
+class AstGraphBuilder::Environment : public ZoneObject {
public:
Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency);
- Environment(const Environment& copy);
int parameters_count() const { return parameters_count_; }
int locals_count() const { return locals_count_; }
@@ -244,6 +363,10 @@ class AstGraphBuilder::Environment
}
}
+ Node* Context() const { return contexts_.back(); }
+ void PushContext(Node* context) { contexts()->push_back(context); }
+ void PopContext() { contexts()->pop_back(); }
+
// Operations on the operand stack.
void Push(Node* node) {
values()->push_back(node);
@@ -279,170 +402,72 @@ class AstGraphBuilder::Environment
// further mutation of the environment will not affect checkpoints.
Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine);
- protected:
- AstGraphBuilder* builder() const {
- return reinterpret_cast<AstGraphBuilder*>(
- StructuredGraphBuilder::Environment::builder());
+ // Control dependency tracked by this environment.
+ Node* GetControlDependency() { return control_dependency_; }
+ void UpdateControlDependency(Node* dependency) {
+ control_dependency_ = dependency;
}
- private:
- void UpdateStateValues(Node** state_values, int offset, int count);
-
- int parameters_count_;
- int locals_count_;
- Node* parameters_node_;
- Node* locals_node_;
- Node* stack_node_;
-};
-
-
-// Each expression in the AST is evaluated in a specific context. This context
-// decides how the evaluation result is passed up the visitor.
-class AstGraphBuilder::AstContext BASE_EMBEDDED {
- public:
- bool IsEffect() const { return kind_ == Expression::kEffect; }
- bool IsValue() const { return kind_ == Expression::kValue; }
- bool IsTest() const { return kind_ == Expression::kTest; }
-
- // Determines how to combine the frame state with the value
- // that is about to be plugged into this AstContext.
- OutputFrameStateCombine GetStateCombine() {
- return IsEffect() ? OutputFrameStateCombine::Ignore()
- : OutputFrameStateCombine::Push();
+ // Effect dependency tracked by this environment.
+ Node* GetEffectDependency() { return effect_dependency_; }
+ void UpdateEffectDependency(Node* dependency) {
+ effect_dependency_ = dependency;
}
- // Plug a node into this expression context. Call this function in tail
- // position in the Visit functions for expressions.
- virtual void ProduceValue(Node* value) = 0;
-
- // Unplugs a node from this expression context. Call this to retrieve the
- // result of another Visit function that already plugged the context.
- virtual Node* ConsumeValue() = 0;
-
- // Shortcut for "context->ProduceValue(context->ConsumeValue())".
- void ReplaceValue() { ProduceValue(ConsumeValue()); }
-
- protected:
- AstContext(AstGraphBuilder* owner, Expression::Context kind);
- virtual ~AstContext();
-
- AstGraphBuilder* owner() const { return owner_; }
- Environment* environment() const { return owner_->environment(); }
-
-// We want to be able to assert, in a context-specific way, that the stack
-// height makes sense when the context is filled.
-#ifdef DEBUG
- int original_height_;
-#endif
-
- private:
- Expression::Context kind_;
- AstGraphBuilder* owner_;
- AstContext* outer_;
-};
-
-
-// Context to evaluate expression for its side effects only.
-class AstGraphBuilder::AstEffectContext FINAL : public AstContext {
- public:
- explicit AstEffectContext(AstGraphBuilder* owner)
- : AstContext(owner, Expression::kEffect) {}
- ~AstEffectContext() FINAL;
- void ProduceValue(Node* value) FINAL;
- Node* ConsumeValue() FINAL;
-};
-
-
-// Context to evaluate expression for its value (and side effects).
-class AstGraphBuilder::AstValueContext FINAL : public AstContext {
- public:
- explicit AstValueContext(AstGraphBuilder* owner)
- : AstContext(owner, Expression::kValue) {}
- ~AstValueContext() FINAL;
- void ProduceValue(Node* value) FINAL;
- Node* ConsumeValue() FINAL;
-};
-
-
-// Context to evaluate expression for a condition value (and side effects).
-class AstGraphBuilder::AstTestContext FINAL : public AstContext {
- public:
- explicit AstTestContext(AstGraphBuilder* owner)
- : AstContext(owner, Expression::kTest) {}
- ~AstTestContext() FINAL;
- void ProduceValue(Node* value) FINAL;
- Node* ConsumeValue() FINAL;
-};
-
-
-// Scoped class tracking breakable statements entered by the visitor. Allows to
-// properly 'break' and 'continue' iteration statements as well as to 'break'
-// from blocks within switch statements.
-class AstGraphBuilder::BreakableScope BASE_EMBEDDED {
- public:
- BreakableScope(AstGraphBuilder* owner, BreakableStatement* target,
- ControlBuilder* control, int drop_extra)
- : owner_(owner),
- target_(target),
- next_(owner->breakable()),
- control_(control),
- drop_extra_(drop_extra) {
- owner_->set_breakable(this); // Push.
+ // Mark this environment as being unreachable.
+ void MarkAsUnreachable() {
+ UpdateControlDependency(builder()->jsgraph()->DeadControl());
}
-
- ~BreakableScope() {
- owner_->set_breakable(next_); // Pop.
+ bool IsMarkedAsUnreachable() {
+ return GetControlDependency()->opcode() == IrOpcode::kDead;
}
- // Either 'break' or 'continue' the target statement.
- void BreakTarget(BreakableStatement* target);
- void ContinueTarget(BreakableStatement* target);
-
- private:
- AstGraphBuilder* owner_;
- BreakableStatement* target_;
- BreakableScope* next_;
- ControlBuilder* control_;
- int drop_extra_;
-
- // Find the correct scope for the target statement. Note that this also drops
- // extra operands from the environment for each scope skipped along the way.
- BreakableScope* FindBreakable(BreakableStatement* target);
-};
+ // Merge another environment into this one.
+ void Merge(Environment* other);
+ // Copies this environment at a control-flow split point.
+ Environment* CopyForConditional() { return Copy(); }
-// Scoped class tracking context objects created by the visitor. Represents
-// mutations of the context chain within the function body and allows to
-// change the current {scope} and {context} during visitation.
-class AstGraphBuilder::ContextScope BASE_EMBEDDED {
- public:
- ContextScope(AstGraphBuilder* owner, Scope* scope, Node* context)
- : owner_(owner),
- next_(owner->execution_context()),
- outer_(owner->current_context()),
- scope_(scope) {
- owner_->set_execution_context(this); // Push.
- owner_->set_current_context(context);
+ // Copies this environment to a potentially unreachable control-flow point.
+ Environment* CopyAsUnreachable() {
+ Environment* env = Copy();
+ env->MarkAsUnreachable();
+ return env;
}
- ~ContextScope() {
- owner_->set_execution_context(next_); // Pop.
- owner_->set_current_context(outer_);
+ // Copies this environment at a loop header control-flow point.
+ Environment* CopyForLoop(BitVector* assigned, bool is_osr = false) {
+ PrepareForLoop(assigned, is_osr);
+ return Copy();
}
- // Current scope during visitation.
- Scope* scope() const { return scope_; }
+ int ContextStackDepth() { return static_cast<int>(contexts_.size()); }
private:
- AstGraphBuilder* owner_;
- ContextScope* next_;
- Node* outer_;
- Scope* scope_;
-};
+ AstGraphBuilder* builder_;
+ int parameters_count_;
+ int locals_count_;
+ NodeVector values_;
+ NodeVector contexts_;
+ Node* control_dependency_;
+ Node* effect_dependency_;
+ Node* parameters_node_;
+ Node* locals_node_;
+ Node* stack_node_;
-Scope* AstGraphBuilder::current_scope() const {
- return execution_context_->scope();
-}
+ explicit Environment(const Environment* copy);
+ Environment* Copy() { return new (zone()) Environment(this); }
+ void UpdateStateValues(Node** state_values, int offset, int count);
+ Zone* zone() const { return builder_->local_zone(); }
+ Graph* graph() const { return builder_->graph(); }
+ AstGraphBuilder* builder() const { return builder_; }
+ CommonOperatorBuilder* common() { return builder_->common(); }
+ NodeVector* values() { return &values_; }
+ NodeVector* contexts() { return &contexts_; }
+
+ // Prepare environment to be used as loop header.
+ void PrepareForLoop(BitVector* assigned, bool is_osr = false);
+};
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index 7adac56e76..c81d5483bf 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -13,7 +13,7 @@ typedef class AstLoopAssignmentAnalyzer ALAA; // for code shortitude.
ALAA::AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info)
: info_(info), loop_stack_(zone) {
- InitializeAstVisitor(zone);
+ InitializeAstVisitor(info->isolate(), zone);
}
@@ -30,6 +30,8 @@ void ALAA::Enter(IterationStatement* loop) {
int num_variables = 1 + info()->scope()->num_parameters() +
info()->scope()->num_stack_slots();
BitVector* bits = new (zone()) BitVector(num_variables, zone());
+ if (info()->is_osr() && info()->osr_ast_id() == loop->OsrEntryId())
+ bits->AddAll();
loop_stack_.push_back(bits);
}
@@ -55,7 +57,6 @@ void ALAA::VisitFunctionDeclaration(FunctionDeclaration* leaf) {}
void ALAA::VisitModuleDeclaration(ModuleDeclaration* leaf) {}
void ALAA::VisitImportDeclaration(ImportDeclaration* leaf) {}
void ALAA::VisitExportDeclaration(ExportDeclaration* leaf) {}
-void ALAA::VisitModuleVariable(ModuleVariable* leaf) {}
void ALAA::VisitModulePath(ModulePath* leaf) {}
void ALAA::VisitModuleUrl(ModuleUrl* leaf) {}
void ALAA::VisitEmptyStatement(EmptyStatement* leaf) {}
@@ -203,9 +204,8 @@ void ALAA::VisitCaseClause(CaseClause* cc) {
// -- Interesting nodes-------------------------------------------------------
// ---------------------------------------------------------------------------
void ALAA::VisitModuleStatement(ModuleStatement* stmt) {
- Visit(stmt->body());
// TODO(turbofan): can a module appear in a loop?
- AnalyzeAssignment(stmt->proxy()->var());
+ Visit(stmt->body());
}
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
index 00a7f2d79d..38eed5b635 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
@@ -7,7 +7,6 @@
#include "src/ast.h"
#include "src/bit-vector.h"
-#include "src/v8.h"
#include "src/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index d7d3ade1b2..54f9d6b059 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -10,6 +10,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/schedule.h"
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.h b/deps/v8/src/compiler/basic-block-instrumentor.h
index 7edac0dbe8..353b0c2ba7 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.h
+++ b/deps/v8/src/compiler/basic-block-instrumentor.h
@@ -5,8 +5,6 @@
#ifndef V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
#define V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
-#include "src/v8.h"
-
#include "src/basic-block-profiler.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
index 7ddc751ab5..0057b10f81 100644
--- a/deps/v8/src/compiler/change-lowering.cc
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -9,7 +9,8 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
namespace v8 {
namespace internal {
@@ -72,8 +73,9 @@ Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
// The AllocateHeapNumberStub does not use the context, so we can safely pass
// in Smi zero here.
Callable callable = CodeFactory::AllocateHeapNumber(isolate());
- CallDescriptor* descriptor = linkage()->GetStubCallDescriptor(
- callable.descriptor(), 0, CallDescriptor::kNoFlags);
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags);
Node* target = jsgraph()->HeapConstant(callable.code());
Node* context = jsgraph()->NoContextConstant();
Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
@@ -163,7 +165,7 @@ Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
machine()->Word64Shl(),
graph()->NewNode(machine()->ChangeInt32ToInt64(), value),
SmiShiftBitsConstant()));
- } else if (NodeProperties::GetBounds(value).upper->Is(Type::SignedSmall())) {
+ } else if (NodeProperties::GetBounds(value).upper->Is(Type::Signed31())) {
return Replace(
graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant()));
}
@@ -227,7 +229,12 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
d1.Chain(control);
Node* number =
- graph()->NewNode(value->op(), object, context, effect, d1.if_true);
+ OperatorProperties::HasFrameStateInput(value->op())
+ ? graph()->NewNode(value->op(), object, context,
+ NodeProperties::GetFrameStateInput(value),
+ effect, d1.if_true)
+ : graph()->NewNode(value->op(), object, context, effect,
+ d1.if_true);
Diamond d2(graph(), common(), TestNotSmi(number));
d2.Nest(d1, true);
Node* phi2 = d2.Phi(kMachFloat64, LoadHeapNumberValue(number, d2.if_true),
diff --git a/deps/v8/src/compiler/change-lowering.h b/deps/v8/src/compiler/change-lowering.h
index 773fd0807b..40a3e152b5 100644
--- a/deps/v8/src/compiler/change-lowering.h
+++ b/deps/v8/src/compiler/change-lowering.h
@@ -19,8 +19,7 @@ class MachineOperatorBuilder;
class ChangeLowering FINAL : public Reducer {
public:
- ChangeLowering(JSGraph* jsgraph, Linkage* linkage)
- : jsgraph_(jsgraph), linkage_(linkage) {}
+ explicit ChangeLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
~ChangeLowering() FINAL;
Reduction Reduce(Node* node) FINAL;
@@ -52,12 +51,10 @@ class ChangeLowering FINAL : public Reducer {
Graph* graph() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
- Linkage* linkage() const { return linkage_; }
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
JSGraph* jsgraph_;
- Linkage* linkage_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 79423441c5..129f9409e3 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -144,6 +144,8 @@ class OutOfLineCode : public ZoneObject {
static inline void FinishCode(MacroAssembler* masm) {
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
masm->CheckConstPool(true, false);
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+ masm->ud2();
#endif
}
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index cfe4f06606..1a4566d609 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -12,6 +12,24 @@ namespace v8 {
namespace internal {
namespace compiler {
+class CodeGenerator::JumpTable FINAL : public ZoneObject {
+ public:
+ JumpTable(JumpTable* next, Label** targets, size_t target_count)
+ : next_(next), targets_(targets), target_count_(target_count) {}
+
+ Label* label() { return &label_; }
+ JumpTable* next() const { return next_; }
+ Label** targets() const { return targets_; }
+ size_t target_count() const { return target_count_; }
+
+ private:
+ Label label_;
+ JumpTable* const next_;
+ Label** const targets_;
+ size_t const target_count_;
+};
+
+
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info)
: frame_(frame),
@@ -21,14 +39,16 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
current_block_(BasicBlock::RpoNumber::Invalid()),
current_source_position_(SourcePosition::Invalid()),
- masm_(code->zone()->isolate(), NULL, 0),
+ masm_(info->isolate(), NULL, 0),
resolver_(this),
safepoints_(code->zone()),
deoptimization_states_(code->zone()),
deoptimization_literals_(code->zone()),
translations_(code->zone()),
last_lazy_deopt_pc_(0),
- ools_(nullptr) {
+ jump_tables_(nullptr),
+ ools_(nullptr),
+ osr_pc_offset_(-1) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -80,12 +100,10 @@ Handle<Code> CodeGenerator::GenerateCode() {
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
masm()->bind(ool->entry());
ool->Generate();
- masm()->jmp(ool->exit());
+ if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
}
}
- FinishCode(masm());
-
// Ensure there is space for lazy deoptimization in the code.
if (!info->IsStub()) {
int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
@@ -94,15 +112,21 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
- safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+ FinishCode(masm());
- // TODO(titzer): what are the right code flags here?
- Code::Kind kind = Code::STUB;
- if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- kind = Code::OPTIMIZED_FUNCTION;
+ // Emit the jump tables.
+ if (jump_tables_) {
+ masm()->Align(kPointerSize);
+ for (JumpTable* table = jump_tables_; table; table = table->next()) {
+ masm()->bind(table->label());
+ AssembleJumpTable(table->targets(), table->target_count());
+ }
}
+
+ safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+
Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
- masm(), Code::ComputeFlags(kind), info);
+ masm(), info->flags(), info);
result->set_is_turbofanned(true);
result->set_stack_slots(frame()->GetSpillSlotCount());
result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
@@ -236,7 +260,7 @@ void CodeGenerator::AssembleGap(GapInstruction* instr) {
void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
CompilationInfo* info = this->info();
int deopt_count = static_cast<int>(deoptimization_states_.size());
- if (deopt_count == 0) return;
+ if (deopt_count == 0 && !info->is_osr()) return;
Handle<DeoptimizationInputData> data =
DeoptimizationInputData::New(isolate(), deopt_count, TENURED);
@@ -266,16 +290,21 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
data->SetLiteralArray(*literals);
}
- // No OSR in Turbofan yet...
- BailoutId osr_ast_id = BailoutId::None();
- data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(-1));
+ if (info->is_osr()) {
+ DCHECK(osr_pc_offset_ >= 0);
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+ } else {
+ BailoutId osr_ast_id = BailoutId::None();
+ data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(-1));
+ }
// Populate deoptimization entries.
for (int i = 0; i < deopt_count; i++) {
DeoptimizationState* deoptimization_state = deoptimization_states_[i];
data->SetAstId(i, deoptimization_state->bailout_id());
- CHECK_NE(NULL, deoptimization_states_[i]);
+ CHECK(deoptimization_states_[i]);
data->SetTranslationIndex(
i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
data->SetArgumentsStackHeight(i, Smi::FromInt(0));
@@ -286,6 +315,12 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
}
+Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
+ jump_tables_ = new (zone()) JumpTable(jump_tables_, targets, target_count);
+ return jump_tables_->label();
+}
+
+
void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
@@ -571,6 +606,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ UNIMPLEMENTED();
+}
+
#endif // !V8_TURBOFAN_BACKEND
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 747bad2e22..658394b321 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -38,7 +38,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
InstructionSequence* code() const { return code_; }
Frame* frame() const { return frame_; }
- Isolate* isolate() const { return zone()->isolate(); }
+ Isolate* isolate() const { return info_->isolate(); }
Linkage* linkage() const { return linkage_; }
Label* GetLabel(BasicBlock::RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
@@ -71,6 +71,8 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
void AssembleArchJump(BasicBlock::RpoNumber target);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+ void AssembleArchLookupSwitch(Instruction* instr);
+ void AssembleArchTableSwitch(Instruction* instr);
void AssembleDeoptimizerCall(int deoptimization_id);
@@ -92,6 +94,18 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
InstructionOperand* destination) FINAL;
// ===========================================================================
+ // =================== Jump table construction methods. ======================
+ // ===========================================================================
+
+ class JumpTable;
+ // Adds a jump table that is emitted after the actual code. Returns label
+ // pointing to the beginning of the table. {targets} is assumed to be static
+ // or zone allocated.
+ Label* AddJumpTable(Label** targets, size_t target_count);
+ // Emits a jump table.
+ void AssembleJumpTable(Label** targets, size_t target_count);
+
+ // ===========================================================================
// Deoptimization table construction
void AddSafepointAndDeopt(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
@@ -145,7 +159,9 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
ZoneDeque<Handle<Object> > deoptimization_literals_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
+ JumpTable* jump_tables_;
OutOfLineCode* ools_;
+ int osr_pc_offset_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index cf597ea998..c3cbcdefc7 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/node.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index a6cca456d2..8aea3df5ad 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -103,13 +103,23 @@ std::ostream& operator<<(std::ostream& os, FrameStateCallInfo const& info) {
}
-#define CACHED_OP_LIST(V) \
- V(Dead, Operator::kFoldable, 0, 0, 0, 1) \
- V(End, Operator::kFoldable, 0, 0, 1, 0) \
- V(IfTrue, Operator::kFoldable, 0, 0, 1, 1) \
- V(IfFalse, Operator::kFoldable, 0, 0, 1, 1) \
- V(Throw, Operator::kFoldable, 1, 1, 1, 1) \
- V(Return, Operator::kNoProperties, 1, 1, 1, 1)
+size_t ProjectionIndexOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kProjection, op->opcode());
+ return OpParameter<size_t>(op);
+}
+
+
+#define CACHED_OP_LIST(V) \
+ V(Always, Operator::kPure, 0, 0, 0, 1, 0, 0) \
+ V(Dead, Operator::kFoldable, 0, 0, 0, 0, 0, 1) \
+ V(End, Operator::kKontrol, 0, 0, 1, 0, 0, 0) \
+ V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(Throw, Operator::kFoldable, 1, 1, 1, 0, 0, 1) \
+ V(Return, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
+ V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
+ V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)
#define CACHED_LOOP_LIST(V) \
@@ -139,14 +149,16 @@ std::ostream& operator<<(std::ostream& os, FrameStateCallInfo const& info) {
struct CommonOperatorGlobalCache FINAL {
-#define CACHED(Name, properties, value_input_count, effect_input_count, \
- control_input_count, control_output_count) \
- struct Name##Operator FINAL : public Operator { \
- Name##Operator() \
- : Operator(IrOpcode::k##Name, properties, #Name, value_input_count, \
- effect_input_count, control_input_count, 0, 0, \
- control_output_count) {} \
- }; \
+#define CACHED(Name, properties, value_input_count, effect_input_count, \
+ control_input_count, value_output_count, effect_output_count, \
+ control_output_count) \
+ struct Name##Operator FINAL : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, properties, #Name, value_input_count, \
+ effect_input_count, control_input_count, \
+ value_output_count, effect_output_count, \
+ control_output_count) {} \
+ }; \
Name##Operator k##Name##Operator;
CACHED_OP_LIST(CACHED)
#undef CACHED
@@ -154,11 +166,11 @@ struct CommonOperatorGlobalCache FINAL {
template <BranchHint kBranchHint>
struct BranchOperator FINAL : public Operator1<BranchHint> {
BranchOperator()
- : Operator1<BranchHint>( // --
- IrOpcode::kBranch, Operator::kFoldable, // opcode
- "Branch", // name
- 1, 0, 1, 0, 0, 2, // counts
- kBranchHint) {} // parameter
+ : Operator1<BranchHint>( // --
+ IrOpcode::kBranch, Operator::kKontrol, // opcode
+ "Branch", // name
+ 1, 0, 1, 0, 0, 2, // counts
+ kBranchHint) {} // parameter
};
BranchOperator<BranchHint::kNone> kBranchNoneOperator;
BranchOperator<BranchHint::kTrue> kBranchTrueOperator;
@@ -167,10 +179,10 @@ struct CommonOperatorGlobalCache FINAL {
template <size_t kInputCount>
struct LoopOperator FINAL : public Operator {
LoopOperator()
- : Operator( // --
- IrOpcode::kLoop, Operator::kFoldable, // opcode
- "Loop", // name
- 0, 0, kInputCount, 0, 0, 1) {} // counts
+ : Operator( // --
+ IrOpcode::kLoop, Operator::kKontrol, // opcode
+ "Loop", // name
+ 0, 0, kInputCount, 0, 0, 1) {} // counts
};
#define CACHED_LOOP(input_count) \
LoopOperator<input_count> kLoop##input_count##Operator;
@@ -180,10 +192,10 @@ struct CommonOperatorGlobalCache FINAL {
template <size_t kInputCount>
struct MergeOperator FINAL : public Operator {
MergeOperator()
- : Operator( // --
- IrOpcode::kMerge, Operator::kFoldable, // opcode
- "Merge", // name
- 0, 0, kInputCount, 0, 0, 1) {} // counts
+ : Operator( // --
+ IrOpcode::kMerge, Operator::kKontrol, // opcode
+ "Merge", // name
+ 0, 0, kInputCount, 0, 0, 1) {} // counts
};
#define CACHED_MERGE(input_count) \
MergeOperator<input_count> kMerge##input_count##Operator;
@@ -214,10 +226,11 @@ CommonOperatorBuilder::CommonOperatorBuilder(Zone* zone)
: cache_(kCache.Get()), zone_(zone) {}
-#define CACHED(Name, properties, value_input_count, effect_input_count, \
- control_input_count, control_output_count) \
- const Operator* CommonOperatorBuilder::Name() { \
- return &cache_.k##Name##Operator; \
+#define CACHED(Name, properties, value_input_count, effect_input_count, \
+ control_input_count, value_output_count, effect_output_count, \
+ control_output_count) \
+ const Operator* CommonOperatorBuilder::Name() { \
+ return &cache_.k##Name##Operator; \
}
CACHED_OP_LIST(CACHED)
#undef CACHED
@@ -237,6 +250,24 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
}
+const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
+ DCHECK_GE(control_output_count, 3u); // Disallow trivial switches.
+ return new (zone()) Operator( // --
+ IrOpcode::kSwitch, Operator::kKontrol, // opcode
+ "Switch", // name
+ 1, 0, 1, 0, 0, control_output_count); // counts
+}
+
+
+const Operator* CommonOperatorBuilder::IfValue(int32_t index) {
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kIfValue, Operator::kKontrol, // opcode
+ "IfValue", // name
+ 0, 0, 1, 0, 0, 1, // counts
+ index); // parameter
+}
+
+
const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
// Outputs are formal parameters, plus context, receiver, and JSFunction.
const int value_output_count = num_formal_parameters + 3;
@@ -258,10 +289,10 @@ const Operator* CommonOperatorBuilder::Loop(int control_input_count) {
break;
}
// Uncached.
- return new (zone()) Operator( // --
- IrOpcode::kLoop, Operator::kFoldable, // opcode
- "Loop", // name
- 0, 0, control_input_count, 0, 0, 1); // counts
+ return new (zone()) Operator( // --
+ IrOpcode::kLoop, Operator::kKontrol, // opcode
+ "Loop", // name
+ 0, 0, control_input_count, 0, 0, 1); // counts
}
@@ -276,18 +307,10 @@ const Operator* CommonOperatorBuilder::Merge(int control_input_count) {
break;
}
// Uncached.
- return new (zone()) Operator( // --
- IrOpcode::kMerge, Operator::kFoldable, // opcode
- "Merge", // name
- 0, 0, control_input_count, 0, 0, 1); // counts
-}
-
-
-const Operator* CommonOperatorBuilder::Terminate(int effects) {
- return new (zone()) Operator( // --
- IrOpcode::kTerminate, Operator::kPure, // opcode
- "Terminate", // name
- 0, effects, 1, 0, 0, 1); // counts
+ return new (zone()) Operator( // --
+ IrOpcode::kMerge, Operator::kKontrol, // opcode
+ "Merge", // name
+ 0, 0, control_input_count, 0, 0, 1); // counts
}
@@ -310,6 +333,15 @@ const Operator* CommonOperatorBuilder::Parameter(int index) {
}
+const Operator* CommonOperatorBuilder::OsrValue(int index) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kOsrValue, Operator::kNoProperties, // opcode
+ "OsrValue", // name
+ 0, 0, 1, 1, 0, 0, // counts
+ index); // parameter
+}
+
+
const Operator* CommonOperatorBuilder::Int32Constant(int32_t value) {
return new (zone()) Operator1<int32_t>( // --
IrOpcode::kInt32Constant, Operator::kPure, // opcode
@@ -407,6 +439,15 @@ const Operator* CommonOperatorBuilder::EffectPhi(int arguments) {
}
+const Operator* CommonOperatorBuilder::EffectSet(int arguments) {
+ DCHECK(arguments > 1); // Disallow empty/singleton sets.
+ return new (zone()) Operator( // --
+ IrOpcode::kEffectSet, Operator::kPure, // opcode
+ "EffectSet", // name
+ 0, arguments, 0, 0, 1, 0); // counts
+}
+
+
const Operator* CommonOperatorBuilder::ValueEffect(int arguments) {
DCHECK(arguments > 0); // Disallow empty value effects.
return new (zone()) Operator( // --
@@ -465,13 +506,32 @@ const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
const Operator* CommonOperatorBuilder::Projection(size_t index) {
- return new (zone()) Operator1<size_t>( // --
- IrOpcode::kProjection, Operator::kPure, // opcode
- "Projection", // name
- 1, 0, 0, 1, 0, 0, // counts
- index); // parameter
+ return new (zone()) Operator1<size_t>( // --
+ IrOpcode::kProjection, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // flags
+ "Projection", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ index); // parameter
+}
+
+
+const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
+ int size) {
+ if (op->opcode() == IrOpcode::kPhi) {
+ return Phi(OpParameter<MachineType>(op), size);
+ } else if (op->opcode() == IrOpcode::kEffectPhi) {
+ return EffectPhi(size);
+ } else if (op->opcode() == IrOpcode::kMerge) {
+ return Merge(size);
+ } else if (op->opcode() == IrOpcode::kLoop) {
+ return Loop(size);
+ } else {
+ UNREACHABLE();
+ return nullptr;
+ }
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index af6066b133..9f2c575163 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -153,19 +153,29 @@ size_t hash_value(FrameStateCallInfo const&);
std::ostream& operator<<(std::ostream&, FrameStateCallInfo const&);
+size_t ProjectionIndexOf(const Operator* const);
+
+
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
class CommonOperatorBuilder FINAL : public ZoneObject {
public:
explicit CommonOperatorBuilder(Zone* zone);
+ // Special operator used only in Branches to mark them as always taken, but
+ // still unfoldable. This is required to properly connect non terminating
+ // loops to end (in both the sea of nodes and the CFG).
+ const Operator* Always();
+
const Operator* Dead();
const Operator* End();
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
+ const Operator* Switch(size_t control_output_count);
+ const Operator* IfValue(int32_t value);
+ const Operator* IfDefault();
const Operator* Throw();
- const Operator* Terminate(int effects);
const Operator* Return();
const Operator* Start(int num_formal_parameters);
@@ -173,6 +183,10 @@ class CommonOperatorBuilder FINAL : public ZoneObject {
const Operator* Merge(int control_input_count);
const Operator* Parameter(int index);
+ const Operator* OsrNormalEntry();
+ const Operator* OsrLoopEntry();
+ const Operator* OsrValue(int index);
+
const Operator* Int32Constant(int32_t);
const Operator* Int64Constant(int64_t);
const Operator* Float32Constant(volatile float);
@@ -184,6 +198,7 @@ class CommonOperatorBuilder FINAL : public ZoneObject {
const Operator* Select(MachineType, BranchHint = BranchHint::kNone);
const Operator* Phi(MachineType type, int arguments);
const Operator* EffectPhi(int arguments);
+ const Operator* EffectSet(int arguments);
const Operator* ValueEffect(int arguments);
const Operator* Finish(int arguments);
const Operator* StateValues(int arguments);
@@ -194,6 +209,10 @@ class CommonOperatorBuilder FINAL : public ZoneObject {
const Operator* Call(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
+ // Constructs a new merge or phi operator with the same opcode as {op}, but
+ // with {size} inputs.
+ const Operator* ResizeMergeOrPhi(const Operator* op, int size);
+
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/control-builders.cc b/deps/v8/src/compiler/control-builders.cc
index 8725244ebd..2ace441e61 100644
--- a/deps/v8/src/compiler/control-builders.cc
+++ b/deps/v8/src/compiler/control-builders.cc
@@ -32,9 +32,8 @@ void IfBuilder::End() {
}
-void LoopBuilder::BeginLoop(BitVector* assigned) {
- builder_->NewLoop();
- loop_environment_ = environment()->CopyForLoop(assigned);
+void LoopBuilder::BeginLoop(BitVector* assigned, bool is_osr) {
+ loop_environment_ = environment()->CopyForLoop(assigned, is_osr);
continue_environment_ = environment()->CopyAsUnreachable();
break_environment_ = environment()->CopyAsUnreachable();
}
@@ -74,6 +73,16 @@ void LoopBuilder::BreakUnless(Node* condition) {
}
+void LoopBuilder::BreakWhen(Node* condition) {
+ IfBuilder control_if(builder_);
+ control_if.If(condition);
+ control_if.Then();
+ Break();
+ control_if.Else();
+ control_if.End();
+}
+
+
void SwitchBuilder::BeginSwitch() {
body_environment_ = environment()->CopyAsUnreachable();
label_environment_ = environment()->CopyAsUnreachable();
@@ -138,6 +147,61 @@ void BlockBuilder::EndBlock() {
break_environment_->Merge(environment());
set_environment(break_environment_);
}
+
+
+void TryCatchBuilder::BeginTry() {
+ catch_environment_ = environment()->CopyAsUnreachable();
+ catch_environment_->Push(the_hole());
+}
+
+
+void TryCatchBuilder::Throw(Node* exception) {
+ environment()->Push(exception);
+ catch_environment_->Merge(environment());
+ environment()->Pop();
+ environment()->MarkAsUnreachable();
}
+
+
+void TryCatchBuilder::EndTry() {
+ exit_environment_ = environment();
+ exception_node_ = catch_environment_->Pop();
+ set_environment(catch_environment_);
}
-} // namespace v8::internal::compiler
+
+
+void TryCatchBuilder::EndCatch() {
+ exit_environment_->Merge(environment());
+ set_environment(exit_environment_);
+}
+
+
+void TryFinallyBuilder::BeginTry() {
+ finally_environment_ = environment()->CopyAsUnreachable();
+ finally_environment_->Push(the_hole());
+}
+
+
+void TryFinallyBuilder::LeaveTry(Node* token) {
+ environment()->Push(token);
+ finally_environment_->Merge(environment());
+ environment()->Pop();
+}
+
+
+void TryFinallyBuilder::EndTry(Node* fallthrough_token) {
+ environment()->Push(fallthrough_token);
+ finally_environment_->Merge(environment());
+ environment()->Pop();
+ token_node_ = finally_environment_->Pop();
+ set_environment(finally_environment_);
+}
+
+
+void TryFinallyBuilder::EndFinally() {
+ // Nothing to be done here.
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/control-builders.h b/deps/v8/src/compiler/control-builders.h
index 11adfdb0f4..c22ee04a98 100644
--- a/deps/v8/src/compiler/control-builders.h
+++ b/deps/v8/src/compiler/control-builders.h
@@ -5,9 +5,7 @@
#ifndef V8_COMPILER_CONTROL_BUILDERS_H_
#define V8_COMPILER_CONTROL_BUILDERS_H_
-#include "src/v8.h"
-
-#include "src/compiler/graph-builder.h"
+#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/node.h"
namespace v8 {
@@ -15,25 +13,24 @@ namespace internal {
namespace compiler {
// Base class for all control builders. Also provides a common interface for
-// control builders to handle 'break' and 'continue' statements when they are
-// used to model breakable statements.
+// control builders to handle 'break' statements when they are used to model
+// breakable statements.
class ControlBuilder {
public:
- explicit ControlBuilder(StructuredGraphBuilder* builder)
- : builder_(builder) {}
+ explicit ControlBuilder(AstGraphBuilder* builder) : builder_(builder) {}
virtual ~ControlBuilder() {}
- // Interface for break and continue.
+ // Interface for break.
virtual void Break() { UNREACHABLE(); }
- virtual void Continue() { UNREACHABLE(); }
protected:
- typedef StructuredGraphBuilder Builder;
- typedef StructuredGraphBuilder::Environment Environment;
+ typedef AstGraphBuilder Builder;
+ typedef AstGraphBuilder::Environment Environment;
Zone* zone() const { return builder_->local_zone(); }
Environment* environment() { return builder_->environment(); }
void set_environment(Environment* env) { builder_->set_environment(env); }
+ Node* the_hole() const { return builder_->jsgraph()->TheHoleConstant(); }
Builder* builder_;
};
@@ -42,7 +39,7 @@ class ControlBuilder {
// Tracks control flow for a conditional statement.
class IfBuilder FINAL : public ControlBuilder {
public:
- explicit IfBuilder(StructuredGraphBuilder* builder)
+ explicit IfBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
then_environment_(NULL),
else_environment_(NULL) {}
@@ -62,23 +59,24 @@ class IfBuilder FINAL : public ControlBuilder {
// Tracks control flow for an iteration statement.
class LoopBuilder FINAL : public ControlBuilder {
public:
- explicit LoopBuilder(StructuredGraphBuilder* builder)
+ explicit LoopBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
loop_environment_(NULL),
continue_environment_(NULL),
break_environment_(NULL) {}
// Primitive control commands.
- void BeginLoop(BitVector* assigned);
+ void BeginLoop(BitVector* assigned, bool is_osr = false);
+ void Continue();
void EndBody();
void EndLoop();
- // Primitive support for break and continue.
- void Continue() FINAL;
+ // Primitive support for break.
void Break() FINAL;
- // Compound control command for conditional break.
+ // Compound control commands for conditional break.
void BreakUnless(Node* condition);
+ void BreakWhen(Node* condition);
private:
Environment* loop_environment_; // Environment of the loop header.
@@ -90,7 +88,7 @@ class LoopBuilder FINAL : public ControlBuilder {
// Tracks control flow for a switch statement.
class SwitchBuilder FINAL : public ControlBuilder {
public:
- explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
+ explicit SwitchBuilder(AstGraphBuilder* builder, int case_count)
: ControlBuilder(builder),
body_environment_(NULL),
label_environment_(NULL),
@@ -123,7 +121,7 @@ class SwitchBuilder FINAL : public ControlBuilder {
// Tracks control flow for a block statement.
class BlockBuilder FINAL : public ControlBuilder {
public:
- explicit BlockBuilder(StructuredGraphBuilder* builder)
+ explicit BlockBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder), break_environment_(NULL) {}
// Primitive control commands.
@@ -137,6 +135,54 @@ class BlockBuilder FINAL : public ControlBuilder {
Environment* break_environment_; // Environment after the block exits.
};
+
+// Tracks control flow for a try-catch statement.
+class TryCatchBuilder FINAL : public ControlBuilder {
+ public:
+ explicit TryCatchBuilder(AstGraphBuilder* builder)
+ : ControlBuilder(builder),
+ catch_environment_(NULL),
+ exit_environment_(NULL),
+ exception_node_(NULL) {}
+
+ // Primitive control commands.
+ void BeginTry();
+ void Throw(Node* exception);
+ void EndTry();
+ void EndCatch();
+
+ // Returns the exception value inside the 'catch' body.
+ Node* GetExceptionNode() const { return exception_node_; }
+
+ private:
+ Environment* catch_environment_; // Environment for the 'catch' body.
+ Environment* exit_environment_; // Environment after the statement.
+ Node* exception_node_; // Node for exception in 'catch' body.
+};
+
+
+// Tracks control flow for a try-finally statement.
+class TryFinallyBuilder FINAL : public ControlBuilder {
+ public:
+ explicit TryFinallyBuilder(AstGraphBuilder* builder)
+ : ControlBuilder(builder),
+ finally_environment_(NULL),
+ token_node_(NULL) {}
+
+ // Primitive control commands.
+ void BeginTry();
+ void LeaveTry(Node* token);
+ void EndTry(Node* token);
+ void EndFinally();
+
+ // Returns the dispatch token value inside the 'finally' body.
+ Node* GetDispatchTokenNode() const { return token_node_; }
+
+ private:
+ Environment* finally_environment_; // Environment for the 'finally' body.
+ Node* token_node_; // Node for token in 'finally' body.
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/control-equivalence.h b/deps/v8/src/compiler/control-equivalence.h
index cca087fe83..db05e3e73d 100644
--- a/deps/v8/src/compiler/control-equivalence.h
+++ b/deps/v8/src/compiler/control-equivalence.h
@@ -5,8 +5,6 @@
#ifndef V8_COMPILER_CONTROL_EQUIVALENCE_H_
#define V8_COMPILER_CONTROL_EQUIVALENCE_H_
-#include "src/v8.h"
-
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
@@ -185,8 +183,7 @@ class ControlEquivalence : public ZoneObject {
Edge edge = *entry.input;
Node* input = edge.to();
++(entry.input);
- if (NodeProperties::IsControlEdge(edge) &&
- NodeProperties::IsControl(input)) {
+ if (NodeProperties::IsControlEdge(edge)) {
// Visit next control input.
if (!GetData(input)->participates) continue;
if (GetData(input)->visited) continue;
@@ -216,8 +213,7 @@ class ControlEquivalence : public ZoneObject {
Edge edge = *entry.use;
Node* use = edge.from();
++(entry.use);
- if (NodeProperties::IsControlEdge(edge) &&
- NodeProperties::IsControl(use)) {
+ if (NodeProperties::IsControlEdge(edge)) {
// Visit next control use.
if (!GetData(use)->participates) continue;
if (GetData(use)->visited) continue;
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
new file mode 100644
index 0000000000..1a2b4cdfd8
--- /dev/null
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -0,0 +1,142 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/control-flow-optimizer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ControlFlowOptimizer::ControlFlowOptimizer(JSGraph* jsgraph, Zone* zone)
+ : jsgraph_(jsgraph),
+ queue_(zone),
+ queued_(jsgraph->graph(), 2),
+ zone_(zone) {}
+
+
+void ControlFlowOptimizer::Optimize() {
+ Enqueue(graph()->start());
+ while (!queue_.empty()) {
+ Node* node = queue_.front();
+ queue_.pop();
+ if (node->IsDead()) continue;
+ switch (node->opcode()) {
+ case IrOpcode::kBranch:
+ VisitBranch(node);
+ break;
+ default:
+ VisitNode(node);
+ break;
+ }
+ }
+}
+
+
+void ControlFlowOptimizer::Enqueue(Node* node) {
+ DCHECK_NOT_NULL(node);
+ if (node->IsDead() || queued_.Get(node)) return;
+ queued_.Set(node, true);
+ queue_.push(node);
+}
+
+
+void ControlFlowOptimizer::VisitNode(Node* node) {
+ for (Node* use : node->uses()) {
+ if (NodeProperties::IsControl(use)) Enqueue(use);
+ }
+}
+
+
+void ControlFlowOptimizer::VisitBranch(Node* node) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+
+ Node* branch = node;
+ Node* cond = NodeProperties::GetValueInput(branch, 0);
+ if (cond->opcode() != IrOpcode::kWord32Equal) return VisitNode(node);
+ Int32BinopMatcher m(cond);
+ Node* index = m.left().node();
+ if (!m.right().HasValue()) return VisitNode(node);
+ int32_t value = m.right().Value();
+ ZoneSet<int32_t> values(zone());
+ values.insert(value);
+
+ Node* if_false;
+ Node* if_true;
+ while (true) {
+ Node* control_projections[2];
+ NodeProperties::CollectControlProjections(branch, control_projections, 2);
+ if_true = control_projections[0];
+ if_false = control_projections[1];
+ DCHECK_EQ(IrOpcode::kIfTrue, if_true->opcode());
+ DCHECK_EQ(IrOpcode::kIfFalse, if_false->opcode());
+
+ auto it = if_false->uses().begin();
+ if (it == if_false->uses().end()) break;
+ Node* branch1 = *it++;
+ if (branch1->opcode() != IrOpcode::kBranch) break;
+ if (it != if_false->uses().end()) break;
+ Node* cond1 = branch1->InputAt(0);
+ if (cond1->opcode() != IrOpcode::kWord32Equal) break;
+ Int32BinopMatcher m1(cond1);
+ if (m1.left().node() != index) break;
+ if (!m1.right().HasValue()) break;
+ int32_t value1 = m1.right().Value();
+ if (values.find(value1) != values.end()) break;
+ DCHECK_NE(value, value1);
+
+ if (branch != node) {
+ branch->RemoveAllInputs();
+ if_true->ReplaceInput(0, node);
+ }
+ if_true->set_op(common()->IfValue(value));
+ if_false->RemoveAllInputs();
+ Enqueue(if_true);
+
+ branch = branch1;
+ value = value1;
+ values.insert(value);
+ }
+
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ DCHECK_EQ(IrOpcode::kIfTrue, if_true->opcode());
+ DCHECK_EQ(IrOpcode::kIfFalse, if_false->opcode());
+ if (branch == node) {
+ DCHECK_EQ(1u, values.size());
+ Enqueue(if_true);
+ Enqueue(if_false);
+ } else {
+ DCHECK_LT(1u, values.size());
+ node->set_op(common()->Switch(values.size() + 1));
+ node->ReplaceInput(0, index);
+ if_true->set_op(common()->IfValue(value));
+ if_true->ReplaceInput(0, node);
+ Enqueue(if_true);
+ if_false->set_op(common()->IfDefault());
+ if_false->ReplaceInput(0, node);
+ Enqueue(if_false);
+ branch->RemoveAllInputs();
+ }
+}
+
+
+CommonOperatorBuilder* ControlFlowOptimizer::common() const {
+ return jsgraph()->common();
+}
+
+
+Graph* ControlFlowOptimizer::graph() const { return jsgraph()->graph(); }
+
+
+MachineOperatorBuilder* ControlFlowOptimizer::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
new file mode 100644
index 0000000000..fb96e01734
--- /dev/null
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -0,0 +1,52 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
+#define V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
+
+#include "src/compiler/node-marker.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+class Node;
+
+
+class ControlFlowOptimizer FINAL {
+ public:
+ ControlFlowOptimizer(JSGraph* jsgraph, Zone* zone);
+
+ void Optimize();
+
+ private:
+ void Enqueue(Node* node);
+ void VisitNode(Node* node);
+ void VisitBranch(Node* node);
+
+ CommonOperatorBuilder* common() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ MachineOperatorBuilder* machine() const;
+ Zone* zone() const { return zone_; }
+
+ JSGraph* const jsgraph_;
+ ZoneQueue<Node*> queue_;
+ NodeMarker<bool> queued_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
diff --git a/deps/v8/src/compiler/control-reducer.cc b/deps/v8/src/compiler/control-reducer.cc
index eef8a49fb1..d20c8dd806 100644
--- a/deps/v8/src/compiler/control-reducer.cc
+++ b/deps/v8/src/compiler/control-reducer.cc
@@ -6,8 +6,9 @@
#include "src/compiler/control-reducer.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/node-marker.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -53,8 +54,7 @@ class ControlReducerImpl {
common_(common),
state_(jsgraph->graph()->NodeCount(), kUnvisited, zone_),
stack_(zone_),
- revisit_(zone_),
- dead_(NULL) {}
+ revisit_(zone_) {}
Zone* zone_;
JSGraph* jsgraph_;
@@ -62,7 +62,6 @@ class ControlReducerImpl {
ZoneVector<VisitState> state_;
ZoneDeque<Node*> stack_;
ZoneDeque<Node*> revisit_;
- Node* dead_;
void Reduce() {
Push(graph()->end());
@@ -105,8 +104,9 @@ class ControlReducerImpl {
marked.Push(start);
marked.SetReachableFromStart(start);
- // We use a stack of (Node, UseIter) pairs to avoid O(n^2) traversal.
- typedef std::pair<Node*, UseIter> FwIter;
+ // We use a stack of (Node, Node::Uses::const_iterator) pairs to avoid
+ // O(n^2) traversal.
+ typedef std::pair<Node*, Node::Uses::const_iterator> FwIter;
ZoneVector<FwIter> fw_stack(zone_);
fw_stack.push_back(FwIter(start, start->uses().begin()));
@@ -131,7 +131,7 @@ class ControlReducerImpl {
pop = false; // restart traversing successors of this node.
break;
}
- if (IrOpcode::IsControlOpcode(succ->opcode()) &&
+ if (NodeProperties::IsControl(succ) &&
!marked.IsReachableFromStart(succ)) {
// {succ} is a control node and not yet reached from start.
marked.Push(succ);
@@ -155,7 +155,7 @@ class ControlReducerImpl {
// Any control nodes not reachable from start are dead, even loops.
for (size_t i = 0; i < nodes.size(); i++) {
Node* node = nodes[i];
- if (IrOpcode::IsControlOpcode(node->opcode()) &&
+ if (NodeProperties::IsControl(node) &&
!marked.IsReachableFromStart(node)) {
ReplaceNode(node, dead()); // uses will be added to revisit queue.
}
@@ -167,44 +167,80 @@ class ControlReducerImpl {
Node* ConnectNTL(Node* loop) {
TRACE(("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic()));
- if (loop->opcode() != IrOpcode::kTerminate) {
- // Insert a {Terminate} node if the loop has effects.
- ZoneDeque<Node*> effects(zone_);
- for (Node* const use : loop->uses()) {
- if (use->opcode() == IrOpcode::kEffectPhi) effects.push_back(use);
- }
- int count = static_cast<int>(effects.size());
- if (count > 0) {
- Node** inputs = zone_->NewArray<Node*>(1 + count);
- for (int i = 0; i < count; i++) inputs[i] = effects[i];
- inputs[count] = loop;
- loop = graph()->NewNode(common_->Terminate(count), 1 + count, inputs);
- TRACE(("AddTerminate: #%d:%s[%d]\n", loop->id(), loop->op()->mnemonic(),
- count));
+ Node* always = graph()->NewNode(common_->Always());
+ // Mark the node as visited so that we can revisit later.
+ MarkAsVisited(always);
+
+ Node* branch = graph()->NewNode(common_->Branch(), always, loop);
+ // Mark the node as visited so that we can revisit later.
+ MarkAsVisited(branch);
+
+ Node* if_true = graph()->NewNode(common_->IfTrue(), branch);
+ // Mark the node as visited so that we can revisit later.
+ MarkAsVisited(if_true);
+
+ Node* if_false = graph()->NewNode(common_->IfFalse(), branch);
+ // Mark the node as visited so that we can revisit later.
+ MarkAsVisited(if_false);
+
+ // Hook up the branch into the loop and collect all loop effects.
+ NodeVector effects(zone_);
+ for (auto edge : loop->use_edges()) {
+ DCHECK_EQ(loop, edge.to());
+ DCHECK(NodeProperties::IsControlEdge(edge));
+ if (edge.from() == branch) continue;
+ switch (edge.from()->opcode()) {
+#define CASE(Opcode) case IrOpcode::k##Opcode:
+ CONTROL_OP_LIST(CASE)
+#undef CASE
+ // Update all control nodes (except {branch}) pointing to the {loop}.
+ edge.UpdateTo(if_true);
+ break;
+ case IrOpcode::kEffectPhi:
+ effects.push_back(edge.from());
+ break;
+ default:
+ break;
}
}
- Node* to_add = loop;
+ // Compute effects for the Return.
+ Node* effect = graph()->start();
+ int const effects_count = static_cast<int>(effects.size());
+ if (effects_count == 1) {
+ effect = effects[0];
+ } else if (effects_count > 1) {
+ effect = graph()->NewNode(common_->EffectSet(effects_count),
+ effects_count, &effects.front());
+ // Mark the node as visited so that we can revisit later.
+ MarkAsVisited(effect);
+ }
+
+ // Add a return to connect the NTL to the end.
+ Node* ret = graph()->NewNode(
+ common_->Return(), jsgraph_->UndefinedConstant(), effect, if_false);
+ // Mark the node as visited so that we can revisit later.
+ MarkAsVisited(ret);
+
Node* end = graph()->end();
CHECK_EQ(IrOpcode::kEnd, end->opcode());
Node* merge = end->InputAt(0);
if (merge == NULL || merge->opcode() == IrOpcode::kDead) {
- // The end node died; just connect end to {loop}.
- end->ReplaceInput(0, loop);
+ // The end node died; just connect end to {ret}.
+ end->ReplaceInput(0, ret);
} else if (merge->opcode() != IrOpcode::kMerge) {
- // Introduce a final merge node for {end->InputAt(0)} and {loop}.
- merge = graph()->NewNode(common_->Merge(2), merge, loop);
+ // Introduce a final merge node for {end->InputAt(0)} and {ret}.
+ merge = graph()->NewNode(common_->Merge(2), merge, ret);
end->ReplaceInput(0, merge);
- to_add = merge;
+ ret = merge;
// Mark the node as visited so that we can revisit later.
- EnsureStateSize(merge->id());
- state_[merge->id()] = kVisited;
+ MarkAsVisited(merge);
} else {
// Append a new input to the final merge at the end.
- merge->AppendInput(graph()->zone(), loop);
+ merge->AppendInput(graph()->zone(), ret);
merge->set_op(common_->Merge(merge->InputCount()));
}
- return to_add;
+ return ret;
}
void AddNodesReachableFromEnd(ReachabilityMarker& marked, NodeVector& nodes) {
@@ -255,12 +291,23 @@ class ControlReducerImpl {
}
#if DEBUG
// Verify that no inputs to live nodes are NULL.
- for (size_t j = 0; j < nodes.size(); j++) {
- Node* node = nodes[j];
- for (Node* const input : node->inputs()) {
- CHECK_NE(NULL, input);
+ for (Node* node : nodes) {
+ for (int index = 0; index < node->InputCount(); index++) {
+ Node* input = node->InputAt(index);
+ if (input == nullptr) {
+ std::ostringstream str;
+ str << "GraphError: node #" << node->id() << ":" << *node->op()
+ << "(input @" << index << ") == null";
+ FATAL(str.str().c_str());
+ }
+ if (input->opcode() == IrOpcode::kDead) {
+ std::ostringstream str;
+ str << "GraphError: node #" << node->id() << ":" << *node->op()
+ << "(input @" << index << ") == dead";
+ FATAL(str.str().c_str());
+ }
}
- for (Node* const use : node->uses()) {
+ for (Node* use : node->uses()) {
CHECK(marked.IsReachableFromEnd(use));
}
}
@@ -281,6 +328,7 @@ class ControlReducerImpl {
// Recurse on an input if necessary.
for (Node* const input : node->inputs()) {
+ DCHECK(input);
if (Recurse(input)) return;
}
@@ -334,16 +382,21 @@ class ControlReducerImpl {
}
}
- Node* dead() {
- if (dead_ == NULL) dead_ = graph()->NewNode(common_->Dead());
- return dead_;
+ // Mark {node} as visited.
+ void MarkAsVisited(Node* node) {
+ size_t id = static_cast<size_t>(node->id());
+ EnsureStateSize(id);
+ state_[id] = kVisited;
}
+ Node* dead() { return jsgraph_->DeadControl(); }
+
//===========================================================================
// Reducer implementation: perform reductions on a node.
//===========================================================================
Node* ReduceNode(Node* node) {
- if (node->op()->ControlInputCount() == 1) {
+ if (node->op()->ControlInputCount() == 1 ||
+ node->opcode() == IrOpcode::kLoop) {
// If a node has only one control input and it is dead, replace with dead.
Node* control = NodeProperties::GetControlInput(node);
if (control->opcode() == IrOpcode::kDead) {
@@ -356,6 +409,10 @@ class ControlReducerImpl {
switch (node->opcode()) {
case IrOpcode::kBranch:
return ReduceBranch(node);
+ case IrOpcode::kIfTrue:
+ return ReduceIfTrue(node);
+ case IrOpcode::kIfFalse:
+ return ReduceIfFalse(node);
case IrOpcode::kLoop:
case IrOpcode::kMerge:
return ReduceMerge(node);
@@ -418,8 +475,8 @@ class ControlReducerImpl {
}
Node* replacement = NULL;
- Node::Inputs inputs = node->inputs();
- for (InputIter it = inputs.begin(); n > 1; --n, ++it) {
+ auto const inputs = node->inputs();
+ for (auto it = inputs.begin(); n > 1; --n, ++it) {
Node* input = *it;
if (input->opcode() == IrOpcode::kDead) continue; // ignore dead inputs.
if (input != node && input != replacement) { // non-redundant input.
@@ -430,6 +487,14 @@ class ControlReducerImpl {
return replacement == NULL ? dead() : replacement;
}
+ // Reduce branches.
+ Node* ReduceBranch(Node* branch) {
+ if (DecideCondition(branch->InputAt(0)) != kUnknown) {
+ for (Node* use : branch->uses()) Revisit(use);
+ }
+ return branch;
+ }
+
// Reduce merges by trimming away dead inputs from the merge and phis.
Node* ReduceMerge(Node* node) {
// Count the number of live inputs.
@@ -454,10 +519,7 @@ class ControlReducerImpl {
// Gather phis and effect phis to be edited.
ZoneVector<Node*> phis(zone_);
for (Node* const use : node->uses()) {
- if (use->opcode() == IrOpcode::kPhi ||
- use->opcode() == IrOpcode::kEffectPhi) {
- phis.push_back(use);
- }
+ if (NodeProperties::IsPhi(use)) phis.push_back(use);
}
if (live == 1) {
@@ -480,55 +542,54 @@ class ControlReducerImpl {
}
// Reduce branches if they have constant inputs.
- Node* ReduceBranch(Node* node) {
- Decision result = DecideCondition(node->InputAt(0));
- if (result == kUnknown) return node;
-
- TRACE(("BranchReduce: #%d:%s = %s\n", node->id(), node->op()->mnemonic(),
- (result == kTrue) ? "true" : "false"));
-
- // Replace IfTrue and IfFalse projections from this branch.
- Node* control = NodeProperties::GetControlInput(node);
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- if (use->opcode() == IrOpcode::kIfTrue) {
- TRACE((" IfTrue: #%d:%s\n", use->id(), use->op()->mnemonic()));
- edge.UpdateTo(NULL);
- ReplaceNode(use, (result == kTrue) ? control : dead());
- } else if (use->opcode() == IrOpcode::kIfFalse) {
- TRACE((" IfFalse: #%d:%s\n", use->id(), use->op()->mnemonic()));
- edge.UpdateTo(NULL);
- ReplaceNode(use, (result == kTrue) ? dead() : control);
- }
+ Node* ReduceIfTrue(Node* node) {
+ Node* branch = node->InputAt(0);
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ Decision result = DecideCondition(branch->InputAt(0));
+ if (result == kTrue) {
+ // fold a true branch by replacing IfTrue with the branch control.
+ TRACE(("BranchReduce: #%d:%s => #%d:%s\n", branch->id(),
+ branch->op()->mnemonic(), node->id(), node->op()->mnemonic()));
+ return branch->InputAt(1);
+ }
+ return result == kUnknown ? node : dead();
+ }
+
+ // Reduce branches if they have constant inputs.
+ Node* ReduceIfFalse(Node* node) {
+ Node* branch = node->InputAt(0);
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ Decision result = DecideCondition(branch->InputAt(0));
+ if (result == kFalse) {
+ // fold a false branch by replacing IfFalse with the branch control.
+ TRACE(("BranchReduce: #%d:%s => #%d:%s\n", branch->id(),
+ branch->op()->mnemonic(), node->id(), node->op()->mnemonic()));
+ return branch->InputAt(1);
}
- return control;
+ return result == kUnknown ? node : dead();
}
// Remove inputs to {node} corresponding to the dead inputs to {merge}
// and compact the remaining inputs, updating the operator.
void RemoveDeadInputs(Node* merge, Node* node) {
- int pos = 0;
- for (int i = 0; i < node->InputCount(); i++) {
+ int live = 0;
+ for (int i = 0; i < merge->InputCount(); i++) {
// skip dead inputs.
- if (i < merge->InputCount() &&
- merge->InputAt(i)->opcode() == IrOpcode::kDead)
- continue;
+ if (merge->InputAt(i)->opcode() == IrOpcode::kDead) continue;
// compact live inputs.
- if (pos != i) node->ReplaceInput(pos, node->InputAt(i));
- pos++;
+ if (live != i) node->ReplaceInput(live, node->InputAt(i));
+ live++;
}
- node->TrimInputCount(pos);
- if (node->opcode() == IrOpcode::kPhi) {
- node->set_op(common_->Phi(OpParameter<MachineType>(node->op()), pos - 1));
- } else if (node->opcode() == IrOpcode::kEffectPhi) {
- node->set_op(common_->EffectPhi(pos - 1));
- } else if (node->opcode() == IrOpcode::kMerge) {
- node->set_op(common_->Merge(pos));
- } else if (node->opcode() == IrOpcode::kLoop) {
- node->set_op(common_->Loop(pos));
- } else {
- UNREACHABLE();
+ // compact remaining inputs.
+ int total = live;
+ for (int i = merge->InputCount(); i < node->InputCount(); i++) {
+ if (total != i) node->ReplaceInput(total, node->InputAt(i));
+ total++;
}
+ DCHECK_EQ(total, live + node->InputCount() - merge->InputCount());
+ DCHECK_NE(total, node->InputCount());
+ node->TrimInputCount(total);
+ node->set_op(common_->ResizeMergeOrPhi(node->op(), live));
}
// Replace uses of {node} with {replacement} and revisit the uses.
@@ -562,30 +623,36 @@ void ControlReducer::TrimGraph(Zone* zone, JSGraph* jsgraph) {
}
-Node* ControlReducer::ReducePhiForTesting(JSGraph* jsgraph,
- CommonOperatorBuilder* common,
- Node* node) {
- Zone zone(jsgraph->graph()->zone()->isolate());
+Node* ControlReducer::ReduceMerge(JSGraph* jsgraph,
+ CommonOperatorBuilder* common, Node* node) {
+ Zone zone;
ControlReducerImpl impl(&zone, jsgraph, common);
- return impl.ReducePhi(node);
+ return impl.ReduceMerge(node);
}
-Node* ControlReducer::ReduceMergeForTesting(JSGraph* jsgraph,
- CommonOperatorBuilder* common,
- Node* node) {
- Zone zone(jsgraph->graph()->zone()->isolate());
+Node* ControlReducer::ReducePhiForTesting(JSGraph* jsgraph,
+ CommonOperatorBuilder* common,
+ Node* node) {
+ Zone zone;
ControlReducerImpl impl(&zone, jsgraph, common);
- return impl.ReduceMerge(node);
+ return impl.ReducePhi(node);
}
-Node* ControlReducer::ReduceBranchForTesting(JSGraph* jsgraph,
+Node* ControlReducer::ReduceIfNodeForTesting(JSGraph* jsgraph,
CommonOperatorBuilder* common,
Node* node) {
- Zone zone(jsgraph->graph()->zone()->isolate());
+ Zone zone;
ControlReducerImpl impl(&zone, jsgraph, common);
- return impl.ReduceBranch(node);
+ switch (node->opcode()) {
+ case IrOpcode::kIfTrue:
+ return impl.ReduceIfTrue(node);
+ case IrOpcode::kIfFalse:
+ return impl.ReduceIfFalse(node);
+ default:
+ return node;
+ }
}
}
}
diff --git a/deps/v8/src/compiler/control-reducer.h b/deps/v8/src/compiler/control-reducer.h
index e25bb88202..bcbd80e916 100644
--- a/deps/v8/src/compiler/control-reducer.h
+++ b/deps/v8/src/compiler/control-reducer.h
@@ -7,8 +7,14 @@
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class Zone;
+
+
namespace compiler {
+// Forward declarations.
class JSGraph;
class CommonOperatorBuilder;
class Node;
@@ -22,15 +28,16 @@ class ControlReducer {
// Trim nodes in the graph that are not reachable from end.
static void TrimGraph(Zone* zone, JSGraph* graph);
+ // Reduces a single merge node and attached phis.
+ static Node* ReduceMerge(JSGraph* graph, CommonOperatorBuilder* builder,
+ Node* node);
+
// Testing interface.
static Node* ReducePhiForTesting(JSGraph* graph,
CommonOperatorBuilder* builder, Node* node);
- static Node* ReduceBranchForTesting(JSGraph* graph,
+ static Node* ReduceIfNodeForTesting(JSGraph* graph,
CommonOperatorBuilder* builder,
Node* node);
- static Node* ReduceMergeForTesting(JSGraph* graph,
- CommonOperatorBuilder* builder,
- Node* node);
};
}
}
diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h
index 6133cc5c8b..cf83638bba 100644
--- a/deps/v8/src/compiler/diamond.h
+++ b/deps/v8/src/compiler/diamond.h
@@ -5,10 +5,8 @@
#ifndef V8_COMPILER_DIAMOND_H_
#define V8_COMPILER_DIAMOND_H_
-#include "src/v8.h"
-
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
#include "src/compiler/node.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index f99d7bd1e1..a5dd11657b 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -5,8 +5,6 @@
#ifndef V8_COMPILER_FRAME_H_
#define V8_COMPILER_FRAME_H_
-#include "src/v8.h"
-
#include "src/bit-vector.h"
namespace v8 {
@@ -23,6 +21,7 @@ class Frame : public ZoneObject {
: register_save_area_size_(0),
spill_slot_count_(0),
double_spill_slot_count_(0),
+ osr_stack_slot_count_(0),
allocated_registers_(NULL),
allocated_double_registers_(NULL) {}
@@ -50,6 +49,14 @@ class Frame : public ZoneObject {
int GetRegisterSaveAreaSize() { return register_save_area_size_; }
+ // OSR stack slots, including locals and expression stack slots.
+ void SetOsrStackSlotCount(int slots) {
+ DCHECK(slots >= 0);
+ osr_stack_slot_count_ = slots;
+ }
+
+ int GetOsrStackSlotCount() { return osr_stack_slot_count_; }
+
int AllocateSpillSlot(bool is_double) {
// If 32-bit, skip one if the new slot is a double.
if (is_double) {
@@ -63,10 +70,16 @@ class Frame : public ZoneObject {
return spill_slot_count_++;
}
+ void ReserveSpillSlots(size_t slot_count) {
+ DCHECK_EQ(0, spill_slot_count_); // can only reserve before allocation.
+ spill_slot_count_ = static_cast<int>(slot_count);
+ }
+
private:
int register_save_area_size_;
int spill_slot_count_;
int double_spill_slot_count_;
+ int osr_stack_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
diff --git a/deps/v8/src/compiler/graph-builder.cc b/deps/v8/src/compiler/graph-builder.cc
deleted file mode 100644
index 6321aaa4e5..0000000000
--- a/deps/v8/src/compiler/graph-builder.cc
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/graph-builder.h"
-
-#include "src/bit-vector.h"
-#include "src/compiler.h"
-#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/operator-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-StructuredGraphBuilder::StructuredGraphBuilder(Zone* local_zone, Graph* graph,
- CommonOperatorBuilder* common)
- : GraphBuilder(graph),
- common_(common),
- environment_(NULL),
- local_zone_(local_zone),
- input_buffer_size_(0),
- input_buffer_(NULL),
- current_context_(NULL),
- exit_control_(NULL) {
- EnsureInputBufferSize(kInputBufferSizeIncrement);
-}
-
-
-Node** StructuredGraphBuilder::EnsureInputBufferSize(int size) {
- if (size > input_buffer_size_) {
- size += kInputBufferSizeIncrement;
- input_buffer_ = local_zone()->NewArray<Node*>(size);
- }
- return input_buffer_;
-}
-
-
-Node* StructuredGraphBuilder::MakeNode(const Operator* op,
- int value_input_count,
- Node** value_inputs, bool incomplete) {
- DCHECK(op->ValueInputCount() == value_input_count);
-
- bool has_context = OperatorProperties::HasContextInput(op);
- bool has_framestate = OperatorProperties::HasFrameStateInput(op);
- bool has_control = op->ControlInputCount() == 1;
- bool has_effect = op->EffectInputCount() == 1;
-
- DCHECK(op->ControlInputCount() < 2);
- DCHECK(op->EffectInputCount() < 2);
-
- Node* result = NULL;
- if (!has_context && !has_framestate && !has_control && !has_effect) {
- result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
- } else {
- int input_count_with_deps = value_input_count;
- if (has_context) ++input_count_with_deps;
- if (has_framestate) ++input_count_with_deps;
- if (has_control) ++input_count_with_deps;
- if (has_effect) ++input_count_with_deps;
- Node** buffer = EnsureInputBufferSize(input_count_with_deps);
- memcpy(buffer, value_inputs, kPointerSize * value_input_count);
- Node** current_input = buffer + value_input_count;
- if (has_context) {
- *current_input++ = current_context();
- }
- if (has_framestate) {
- // The frame state will be inserted later. Here we misuse
- // the dead_control node as a sentinel to be later overwritten
- // with the real frame state.
- *current_input++ = dead_control();
- }
- if (has_effect) {
- *current_input++ = environment_->GetEffectDependency();
- }
- if (has_control) {
- *current_input++ = environment_->GetControlDependency();
- }
- result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
- if (has_effect) {
- environment_->UpdateEffectDependency(result);
- }
- if (result->op()->ControlOutputCount() > 0 &&
- !environment()->IsMarkedAsUnreachable()) {
- environment_->UpdateControlDependency(result);
- }
- }
-
- return result;
-}
-
-
-void StructuredGraphBuilder::UpdateControlDependencyToLeaveFunction(
- Node* exit) {
- if (environment()->IsMarkedAsUnreachable()) return;
- if (exit_control() != NULL) {
- exit = MergeControl(exit_control(), exit);
- }
- environment()->MarkAsUnreachable();
- set_exit_control(exit);
-}
-
-
-StructuredGraphBuilder::Environment* StructuredGraphBuilder::CopyEnvironment(
- Environment* env) {
- return new (local_zone()) Environment(*env);
-}
-
-
-StructuredGraphBuilder::Environment::Environment(
- StructuredGraphBuilder* builder, Node* control_dependency)
- : builder_(builder),
- control_dependency_(control_dependency),
- effect_dependency_(control_dependency),
- values_(zone()) {}
-
-
-StructuredGraphBuilder::Environment::Environment(const Environment& copy)
- : builder_(copy.builder()),
- control_dependency_(copy.control_dependency_),
- effect_dependency_(copy.effect_dependency_),
- values_(copy.zone()) {
- const size_t kStackEstimate = 7; // optimum from experimentation!
- values_.reserve(copy.values_.size() + kStackEstimate);
- values_.insert(values_.begin(), copy.values_.begin(), copy.values_.end());
-}
-
-
-void StructuredGraphBuilder::Environment::Merge(Environment* other) {
- DCHECK(values_.size() == other->values_.size());
-
- // Nothing to do if the other environment is dead.
- if (other->IsMarkedAsUnreachable()) return;
-
- // Resurrect a dead environment by copying the contents of the other one and
- // placing a singleton merge as the new control dependency.
- if (this->IsMarkedAsUnreachable()) {
- Node* other_control = other->control_dependency_;
- Node* inputs[] = {other_control};
- control_dependency_ =
- graph()->NewNode(common()->Merge(1), arraysize(inputs), inputs, true);
- effect_dependency_ = other->effect_dependency_;
- values_ = other->values_;
- return;
- }
-
- // Create a merge of the control dependencies of both environments and update
- // the current environment's control dependency accordingly.
- Node* control = builder_->MergeControl(this->GetControlDependency(),
- other->GetControlDependency());
- UpdateControlDependency(control);
-
- // Create a merge of the effect dependencies of both environments and update
- // the current environment's effect dependency accordingly.
- Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
- other->GetEffectDependency(), control);
- UpdateEffectDependency(effect);
-
- // Introduce Phi nodes for values that have differing input at merge points,
- // potentially extending an existing Phi node if possible.
- for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
- values_[i] = builder_->MergeValue(values_[i], other->values_[i], control);
- }
-}
-
-
-void StructuredGraphBuilder::Environment::PrepareForLoop(BitVector* assigned) {
- Node* control = GetControlDependency();
- int size = static_cast<int>(values()->size());
- if (assigned == NULL) {
- // Assume that everything is updated in the loop.
- for (int i = 0; i < size; ++i) {
- Node* phi = builder_->NewPhi(1, values()->at(i), control);
- values()->at(i) = phi;
- }
- } else {
- // Only build phis for those locals assigned in this loop.
- for (int i = 0; i < size; ++i) {
- if (i < assigned->length() && !assigned->Contains(i)) continue;
- Node* phi = builder_->NewPhi(1, values()->at(i), control);
- values()->at(i) = phi;
- }
- }
- Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
- UpdateEffectDependency(effect);
-}
-
-
-Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
- const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
- Node** buffer = EnsureInputBufferSize(count + 1);
- MemsetPointer(buffer, input, count);
- buffer[count] = control;
- return graph()->NewNode(phi_op, count + 1, buffer, true);
-}
-
-
-// TODO(mstarzinger): Revisit this once we have proper effect states.
-Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
- Node* control) {
- const Operator* phi_op = common()->EffectPhi(count);
- Node** buffer = EnsureInputBufferSize(count + 1);
- MemsetPointer(buffer, input, count);
- buffer[count] = control;
- return graph()->NewNode(phi_op, count + 1, buffer, true);
-}
-
-
-Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
- int inputs = control->op()->ControlInputCount() + 1;
- if (control->opcode() == IrOpcode::kLoop) {
- // Control node for loop exists, add input.
- const Operator* op = common()->Loop(inputs);
- control->AppendInput(graph_zone(), other);
- control->set_op(op);
- } else if (control->opcode() == IrOpcode::kMerge) {
- // Control node for merge exists, add input.
- const Operator* op = common()->Merge(inputs);
- control->AppendInput(graph_zone(), other);
- control->set_op(op);
- } else {
- // Control node is a singleton, introduce a merge.
- const Operator* op = common()->Merge(inputs);
- Node* inputs[] = {control, other};
- control = graph()->NewNode(op, arraysize(inputs), inputs, true);
- }
- return control;
-}
-
-
-Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other,
- Node* control) {
- int inputs = control->op()->ControlInputCount();
- if (value->opcode() == IrOpcode::kEffectPhi &&
- NodeProperties::GetControlInput(value) == control) {
- // Phi already exists, add input.
- value->set_op(common()->EffectPhi(inputs));
- value->InsertInput(graph_zone(), inputs - 1, other);
- } else if (value != other) {
- // Phi does not exist yet, introduce one.
- value = NewEffectPhi(inputs, value, control);
- value->ReplaceInput(inputs - 1, other);
- }
- return value;
-}
-
-
-Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
- Node* control) {
- int inputs = control->op()->ControlInputCount();
- if (value->opcode() == IrOpcode::kPhi &&
- NodeProperties::GetControlInput(value) == control) {
- // Phi already exists, add input.
- value->set_op(common()->Phi(kMachAnyTagged, inputs));
- value->InsertInput(graph_zone(), inputs - 1, other);
- } else if (value != other) {
- // Phi does not exist yet, introduce one.
- value = NewPhi(inputs, value, control);
- value->ReplaceInput(inputs - 1, other);
- }
- return value;
-}
-
-
-Node* StructuredGraphBuilder::dead_control() {
- if (!dead_control_.is_set()) {
- Node* dead_node = graph()->NewNode(common_->Dead());
- dead_control_.set(dead_node);
- return dead_node;
- }
- return dead_control_.get();
-}
-}
-}
-} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/graph-builder.h b/deps/v8/src/compiler/graph-builder.h
index d88b125987..f2fb7f6c09 100644
--- a/deps/v8/src/compiler/graph-builder.h
+++ b/deps/v8/src/compiler/graph-builder.h
@@ -5,26 +5,21 @@
#ifndef V8_COMPILER_GRAPH_BUILDER_H_
#define V8_COMPILER_GRAPH_BUILDER_H_
-#include "src/v8.h"
-
#include "src/allocation.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
#include "src/unique.h"
namespace v8 {
namespace internal {
-
-class BitVector;
-
namespace compiler {
-class Node;
-
// A common base class for anything that creates nodes in a graph.
class GraphBuilder {
public:
- explicit GraphBuilder(Graph* graph) : graph_(graph) {}
+ GraphBuilder(Isolate* isolate, Graph* graph)
+ : isolate_(isolate), graph_(graph) {}
virtual ~GraphBuilder() {}
Node* NewNode(const Operator* op, bool incomplete = false) {
@@ -67,6 +62,7 @@ class GraphBuilder {
return MakeNode(op, value_input_count, value_inputs, incomplete);
}
+ Isolate* isolate() const { return isolate_; }
Graph* graph() const { return graph_; }
protected:
@@ -75,174 +71,12 @@ class GraphBuilder {
Node** value_inputs, bool incomplete) = 0;
private:
+ Isolate* isolate_;
Graph* graph_;
};
-
-// The StructuredGraphBuilder produces a high-level IR graph. It is used as the
-// base class for concrete implementations (e.g the AstGraphBuilder or the
-// StubGraphBuilder).
-class StructuredGraphBuilder : public GraphBuilder {
- public:
- StructuredGraphBuilder(Zone* zone, Graph* graph,
- CommonOperatorBuilder* common);
- ~StructuredGraphBuilder() OVERRIDE {}
-
- // Creates a new Phi node having {count} input values.
- Node* NewPhi(int count, Node* input, Node* control);
- Node* NewEffectPhi(int count, Node* input, Node* control);
-
- // Helpers for merging control, effect or value dependencies.
- Node* MergeControl(Node* control, Node* other);
- Node* MergeEffect(Node* value, Node* other, Node* control);
- Node* MergeValue(Node* value, Node* other, Node* control);
-
- // Helpers to create new control nodes.
- Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
- Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
- Node* NewMerge() { return NewNode(common()->Merge(1), true); }
- Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
- return NewNode(common()->Branch(hint), condition);
- }
-
- protected:
- class Environment;
- friend class Environment;
- friend class ControlBuilder;
-
- // The following method creates a new node having the specified operator and
- // ensures effect and control dependencies are wired up. The dependencies
- // tracked by the environment might be mutated.
- Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
- bool incomplete) FINAL;
-
- Environment* environment() const { return environment_; }
- void set_environment(Environment* env) { environment_ = env; }
-
- Node* current_context() const { return current_context_; }
- void set_current_context(Node* context) { current_context_ = context; }
-
- Node* exit_control() const { return exit_control_; }
- void set_exit_control(Node* node) { exit_control_ = node; }
-
- Node* dead_control();
-
- Zone* graph_zone() const { return graph()->zone(); }
- Zone* local_zone() const { return local_zone_; }
- Isolate* isolate() const { return graph_zone()->isolate(); }
- CommonOperatorBuilder* common() const { return common_; }
-
- // Helper to wrap a Handle<T> into a Unique<T>.
- template <class T>
- Unique<T> MakeUnique(Handle<T> object) {
- return Unique<T>::CreateUninitialized(object);
- }
-
- // Support for control flow builders. The concrete type of the environment
- // depends on the graph builder, but environments themselves are not virtual.
- virtual Environment* CopyEnvironment(Environment* env);
-
- // Helper to indicate a node exits the function body.
- void UpdateControlDependencyToLeaveFunction(Node* exit);
-
- private:
- CommonOperatorBuilder* common_;
- Environment* environment_;
-
- // Zone local to the builder for data not leaking into the graph.
- Zone* local_zone_;
-
- // Temporary storage for building node input lists.
- int input_buffer_size_;
- Node** input_buffer_;
-
- // Node representing the control dependency for dead code.
- SetOncePointer<Node> dead_control_;
-
- // Node representing the current context within the function body.
- Node* current_context_;
-
- // Merge of all control nodes that exit the function body.
- Node* exit_control_;
-
- // Growth increment for the temporary buffer used to construct input lists to
- // new nodes.
- static const int kInputBufferSizeIncrement = 64;
-
- Node** EnsureInputBufferSize(int size);
-
- DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
-};
-
-
-// The abstract execution environment contains static knowledge about
-// execution state at arbitrary control-flow points. It allows for
-// simulation of the control-flow at compile time.
-class StructuredGraphBuilder::Environment : public ZoneObject {
- public:
- Environment(StructuredGraphBuilder* builder, Node* control_dependency);
- Environment(const Environment& copy);
-
- // Control dependency tracked by this environment.
- Node* GetControlDependency() { return control_dependency_; }
- void UpdateControlDependency(Node* dependency) {
- control_dependency_ = dependency;
- }
-
- // Effect dependency tracked by this environment.
- Node* GetEffectDependency() { return effect_dependency_; }
- void UpdateEffectDependency(Node* dependency) {
- effect_dependency_ = dependency;
- }
-
- // Mark this environment as being unreachable.
- void MarkAsUnreachable() {
- UpdateControlDependency(builder()->dead_control());
- }
- bool IsMarkedAsUnreachable() {
- return GetControlDependency()->opcode() == IrOpcode::kDead;
- }
-
- // Merge another environment into this one.
- void Merge(Environment* other);
-
- // Copies this environment at a control-flow split point.
- Environment* CopyForConditional() { return builder()->CopyEnvironment(this); }
-
- // Copies this environment to a potentially unreachable control-flow point.
- Environment* CopyAsUnreachable() {
- Environment* env = builder()->CopyEnvironment(this);
- env->MarkAsUnreachable();
- return env;
- }
-
- // Copies this environment at a loop header control-flow point.
- Environment* CopyForLoop(BitVector* assigned) {
- PrepareForLoop(assigned);
- return builder()->CopyEnvironment(this);
- }
-
- Node* GetContext() { return builder_->current_context(); }
-
- protected:
- Zone* zone() const { return builder_->local_zone(); }
- Graph* graph() const { return builder_->graph(); }
- StructuredGraphBuilder* builder() const { return builder_; }
- CommonOperatorBuilder* common() { return builder_->common(); }
- NodeVector* values() { return &values_; }
-
- // Prepare environment to be used as loop header.
- void PrepareForLoop(BitVector* assigned);
-
- private:
- StructuredGraphBuilder* builder_;
- Node* control_dependency_;
- Node* effect_dependency_;
- NodeVector values_;
-};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_GRAPH_BUILDER_H__
diff --git a/deps/v8/src/compiler/graph-inl.h b/deps/v8/src/compiler/graph-inl.h
index c135ae5d77..3a21737c6e 100644
--- a/deps/v8/src/compiler/graph-inl.h
+++ b/deps/v8/src/compiler/graph-inl.h
@@ -14,7 +14,7 @@ namespace compiler {
template <class Visitor>
void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
- Zone tmp_zone(zone()->isolate());
+ Zone tmp_zone;
GenericGraphVisit::Visit<Visitor>(this, &tmp_zone, end(), visitor);
}
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 9a6b121ffb..7f3a66e0de 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/graph-reducer.h"
-
#include <functional>
-#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/node.h"
namespace v8 {
namespace internal {
@@ -154,9 +154,11 @@ void GraphReducer::ReduceTop() {
// Otherwise {node} was replaced by a new node. Replace all old uses of
// {node} with {replacement}. New nodes created by this reduction can
// use {node}.
- node->ReplaceUsesIf(
- [node_count](Node* const node) { return node->id() < node_count; },
- replacement);
+ for (Edge edge : node->use_edges()) {
+ if (edge.from()->id() < node_count) {
+ edge.UpdateTo(replacement);
+ }
+ }
// Unlink {node} if it's no longer used.
if (node->uses().empty()) {
node->Kill();
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 09a650cce8..5c612baf6e 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -5,13 +5,18 @@
#ifndef V8_COMPILER_GRAPH_REDUCER_H_
#define V8_COMPILER_GRAPH_REDUCER_H_
-#include "src/compiler/graph.h"
+#include "src/compiler/node-marker.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
+class Graph;
+class Node;
+
+
// Represents the result of trying to reduce a node in the graph.
class Reduction FINAL {
public:
diff --git a/deps/v8/src/compiler/graph-replay.cc b/deps/v8/src/compiler/graph-replay.cc
index 3a0b7836fc..06771ff824 100644
--- a/deps/v8/src/compiler/graph-replay.cc
+++ b/deps/v8/src/compiler/graph-replay.cc
@@ -4,9 +4,9 @@
#include "src/compiler/graph-replay.h"
+#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/compiler/operator-properties.h"
@@ -20,22 +20,26 @@ namespace compiler {
void GraphReplayPrinter::PrintReplay(Graph* graph) {
GraphReplayPrinter replay;
PrintF(" Node* nil = graph.NewNode(common_builder.Dead());\n");
- graph->VisitNodeInputsFromEnd(&replay);
-}
-
+ Zone zone;
+ AllNodes nodes(&zone, graph);
-void GraphReplayPrinter::Pre(Node* node) {
- PrintReplayOpCreator(node->op());
- PrintF(" Node* n%d = graph.NewNode(op", node->id());
- for (int i = 0; i < node->InputCount(); ++i) {
- PrintF(", nil");
+ // Allocate the nodes first.
+ for (Node* node : nodes.live) {
+ PrintReplayOpCreator(node->op());
+ PrintF(" Node* n%d = graph.NewNode(op", node->id());
+ for (int i = 0; i < node->InputCount(); ++i) {
+ PrintF(", nil");
+ }
+ PrintF("); USE(n%d);\n", node->id());
}
- PrintF("); USE(n%d);\n", node->id());
-}
-
-void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) {
- PrintF(" n%d->ReplaceInput(%d, n%d);\n", from->id(), index, to->id());
+ // Connect the nodes to their inputs.
+ for (Node* node : nodes.live) {
+ for (int i = 0; i < node->InputCount(); i++) {
+ PrintF(" n%d->ReplaceInput(%d, n%d);\n", node->id(), i,
+ node->InputAt(i)->id());
+ }
+ }
}
diff --git a/deps/v8/src/compiler/graph-replay.h b/deps/v8/src/compiler/graph-replay.h
index f41311e3bf..be89ebd045 100644
--- a/deps/v8/src/compiler/graph-replay.h
+++ b/deps/v8/src/compiler/graph-replay.h
@@ -5,7 +5,6 @@
#ifndef V8_COMPILER_GRAPH_REPLAY_H_
#define V8_COMPILER_GRAPH_REPLAY_H_
-#include "src/compiler/generic-algorithm.h"
#include "src/compiler/node.h"
namespace v8 {
@@ -18,7 +17,7 @@ class Graph;
// Helper class to print a full replay of a graph. This replay can be used to
// materialize the same graph within a C++ unit test and hence test subsequent
// optimization passes on a graph without going through the construction steps.
-class GraphReplayPrinter FINAL : public NullNodeVisitor {
+class GraphReplayPrinter {
public:
#ifdef DEBUG
static void PrintReplay(Graph* graph);
@@ -26,9 +25,6 @@ class GraphReplayPrinter FINAL : public NullNodeVisitor {
static void PrintReplay(Graph* graph) {}
#endif
- void Pre(Node* node);
- void PostEdge(Node* from, int index, Node* to);
-
private:
GraphReplayPrinter() {}
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index e018c7ac19..42d355fb1d 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -8,13 +8,13 @@
#include <string>
#include "src/code-stubs.h"
+#include "src/compiler/all-nodes.h"
#include "src/compiler/graph.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
@@ -24,63 +24,40 @@ namespace v8 {
namespace internal {
namespace compiler {
-static int SafeId(Node* node) { return node == NULL ? -1 : node->id(); }
-static const char* SafeMnemonic(Node* node) {
- return node == NULL ? "null" : node->op()->mnemonic();
-}
-#define DEAD_COLOR "#999999"
-
-class AllNodes {
- public:
- enum State { kDead, kGray, kLive };
-
- AllNodes(Zone* local_zone, const Graph* graph)
- : state(graph->NodeCount(), kDead, local_zone),
- live(local_zone),
- gray(local_zone) {
- Node* end = graph->end();
- state[end->id()] = kLive;
- live.push_back(end);
- // Find all live nodes reachable from end.
- for (size_t i = 0; i < live.size(); i++) {
- for (Node* const input : live[i]->inputs()) {
- if (input == NULL) {
- // TODO(titzer): print a warning.
- continue;
- }
- if (input->id() >= graph->NodeCount()) {
- // TODO(titzer): print a warning.
- continue;
- }
- if (state[input->id()] != kLive) {
- live.push_back(input);
- state[input->id()] = kLive;
- }
- }
- }
-
- // Find all nodes that are not reachable from end that use live nodes.
- for (size_t i = 0; i < live.size(); i++) {
- for (Node* const use : live[i]->uses()) {
- if (state[use->id()] == kDead) {
- gray.push_back(use);
- state[use->id()] = kGray;
- }
- }
+FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
+ const char* suffix, const char* mode) {
+ EmbeddedVector<char, 256> filename;
+ SmartArrayPointer<char> function_name;
+ if (!info->shared_info().is_null()) {
+ function_name = info->shared_info()->DebugName()->ToCString();
+ if (strlen(function_name.get()) > 0) {
+ SNPrintF(filename, "turbo-%s", function_name.get());
+ } else {
+ SNPrintF(filename, "turbo-%p", static_cast<void*>(info));
}
+ } else {
+ SNPrintF(filename, "turbo-none-%s", phase);
}
+ std::replace(filename.start(), filename.start() + filename.length(), ' ',
+ '_');
- bool IsLive(Node* node) {
- return node != NULL && node->id() < static_cast<int>(state.size()) &&
- state[node->id()] == kLive;
+ EmbeddedVector<char, 256> full_filename;
+ if (phase == NULL) {
+ SNPrintF(full_filename, "%s.%s", filename.start(), suffix);
+ } else {
+ SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
}
+ return base::OS::FOpen(full_filename.start(), mode);
+}
- ZoneVector<State> state;
- NodeVector live;
- NodeVector gray;
-};
+static int SafeId(Node* node) { return node == NULL ? -1 : node->id(); }
+static const char* SafeMnemonic(Node* node) {
+ return node == NULL ? "null" : node->op()->mnemonic();
+}
+
+#define DEAD_COLOR "#999999"
class Escaped {
public:
@@ -111,25 +88,27 @@ class Escaped {
class JSONGraphNodeWriter {
public:
- JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph)
- : os_(os), all_(zone, graph), first_node_(true) {}
+ JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph,
+ const SourcePositionTable* positions)
+ : os_(os), all_(zone, graph), positions_(positions), first_node_(true) {}
void Print() {
for (Node* const node : all_.live) PrintNode(node);
+ os_ << "\n";
}
void PrintNode(Node* node) {
if (first_node_) {
first_node_ = false;
} else {
- os_ << ",";
+ os_ << ",\n";
}
std::ostringstream label;
label << *node->op();
os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << Escaped(label, "\"")
<< "\"";
IrOpcode::Value opcode = node->opcode();
- if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+ if (IrOpcode::IsPhiOpcode(opcode)) {
os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
<< "]";
os_ << ",\"rankWithInput\":[" << NodeProperties::FirstControlIndex(node)
@@ -142,6 +121,11 @@ class JSONGraphNodeWriter {
if (opcode == IrOpcode::kBranch) {
os_ << ",\"rankInputs\":[0]";
}
+ SourcePosition position = positions_->GetSourcePosition(node);
+ if (!position.IsUnknown()) {
+ DCHECK(!position.IsInvalid());
+ os_ << ",\"pos\":" << position.raw();
+ }
os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
: "false");
@@ -151,6 +135,7 @@ class JSONGraphNodeWriter {
private:
std::ostream& os_;
AllNodes all_;
+ const SourcePositionTable* positions_;
bool first_node_;
DISALLOW_COPY_AND_ASSIGN(JSONGraphNodeWriter);
@@ -164,6 +149,7 @@ class JSONGraphEdgeWriter {
void Print() {
for (Node* const node : all_.live) PrintEdges(node);
+ os_ << "\n";
}
void PrintEdges(Node* node) {
@@ -178,7 +164,7 @@ class JSONGraphEdgeWriter {
if (first_edge_) {
first_edge_ = false;
} else {
- os_ << ",";
+ os_ << ",\n";
}
const char* edge_type = NULL;
if (index < NodeProperties::FirstValueIndex(from)) {
@@ -208,10 +194,10 @@ class JSONGraphEdgeWriter {
std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
- Zone tmp_zone(ad.graph.zone()->isolate());
- os << "{\"nodes\":[";
- JSONGraphNodeWriter(os, &tmp_zone, &ad.graph).Print();
- os << "],\"edges\":[";
+ Zone tmp_zone;
+ os << "{\n\"nodes\":[";
+ JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions).Print();
+ os << "],\n\"edges\":[";
JSONGraphEdgeWriter(os, &tmp_zone, &ad.graph).Print();
os << "]}";
return os;
@@ -325,8 +311,7 @@ void GraphVisualizer::PrintNode(Node* node, bool gray) {
static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
- if (from->opcode() == IrOpcode::kPhi ||
- from->opcode() == IrOpcode::kEffectPhi) {
+ if (NodeProperties::IsPhi(from)) {
Node* control = NodeProperties::GetControlInput(from, 0);
return control != NULL && control->opcode() != IrOpcode::kMerge &&
control != to && index != 0;
@@ -391,7 +376,7 @@ void GraphVisualizer::Print() {
std::ostream& operator<<(std::ostream& os, const AsDOT& ad) {
- Zone tmp_zone(ad.graph.zone()->isolate());
+ Zone tmp_zone;
GraphVisualizer(os, &tmp_zone, &ad.graph).Print();
return os;
}
@@ -417,7 +402,8 @@ class GraphC1Visualizer {
void PrintNodeId(Node* n);
void PrintNode(Node* n);
void PrintInputs(Node* n);
- void PrintInputs(InputIter* i, int count, const char* prefix);
+ template <typename InputIterator>
+ void PrintInputs(InputIterator* i, int count, const char* prefix);
void PrintType(Node* node);
void PrintLiveRange(LiveRange* range, const char* type);
@@ -516,7 +502,8 @@ void GraphC1Visualizer::PrintNode(Node* n) {
}
-void GraphC1Visualizer::PrintInputs(InputIter* i, int count,
+template <typename InputIterator>
+void GraphC1Visualizer::PrintInputs(InputIterator* i, int count,
const char* prefix) {
if (count > 0) {
os_ << prefix;
@@ -569,17 +556,15 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
PrintIndent();
os_ << "predecessors";
- for (BasicBlock::Predecessors::iterator j = current->predecessors_begin();
- j != current->predecessors_end(); ++j) {
- os_ << " \"B" << (*j)->id() << "\"";
+ for (BasicBlock* predecessor : current->predecessors()) {
+ os_ << " \"B" << predecessor->id() << "\"";
}
os_ << "\n";
PrintIndent();
os_ << "successors";
- for (BasicBlock::Successors::iterator j = current->successors_begin();
- j != current->successors_end(); ++j) {
- os_ << " \"B" << (*j)->id() << "\"";
+ for (BasicBlock* successor : current->successors()) {
+ os_ << " \"B" << successor->id() << "\"";
}
os_ << "\n";
@@ -664,9 +649,8 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
os_ << -1 - current->id().ToInt() << " Goto";
}
os_ << " ->";
- for (BasicBlock::Successors::iterator j = current->successors_begin();
- j != current->successors_end(); ++j) {
- os_ << " B" << (*j)->id();
+ for (BasicBlock* successor : current->successors()) {
+ os_ << " B" << successor->id();
}
if (FLAG_trace_turbo_types && current->control_input() != NULL) {
os_ << " ";
@@ -714,13 +698,13 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
PrintIndent();
os_ << range->id() << " " << type;
if (range->HasRegisterAssigned()) {
- InstructionOperand* op = range->CreateAssignedOperand(zone());
- int assigned_reg = op->index();
- if (op->IsDoubleRegister()) {
+ InstructionOperand op = range->GetAssignedOperand();
+ int assigned_reg = op.index();
+ if (op.IsDoubleRegister()) {
os_ << " \"" << DoubleRegister::AllocationIndexToString(assigned_reg)
<< "\"";
} else {
- DCHECK(op->IsRegister());
+ DCHECK(op.IsRegister());
os_ << " \"" << Register::AllocationIndexToString(assigned_reg) << "\"";
}
} else if (range->IsSpilled()) {
@@ -771,14 +755,14 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
- Zone tmp_zone(ac.info_->isolate());
+ Zone tmp_zone;
GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
return os;
}
std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
- Zone tmp_zone(ac.schedule_->zone()->isolate());
+ Zone tmp_zone;
GraphC1Visualizer(os, &tmp_zone)
.PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
return os;
@@ -786,7 +770,7 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac) {
- Zone tmp_zone(ac.allocator_->code()->zone()->isolate());
+ Zone tmp_zone;
GraphC1Visualizer(os, &tmp_zone).PrintAllocator(ac.phase_, ac.allocator_);
return os;
}
@@ -796,7 +780,7 @@ const int kOnStack = 1;
const int kVisited = 2;
std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
- Zone local_zone(ar.graph.zone()->isolate());
+ Zone local_zone;
ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
ZoneStack<Node*> stack(&local_zone);
@@ -816,7 +800,7 @@ std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
if (pop) {
state[n->id()] = kVisited;
stack.pop();
- os << "#" << SafeId(n) << ":" << SafeMnemonic(n) << "(";
+ os << "#" << n->id() << ":" << *n->op() << "(";
int j = 0;
for (Node* const i : n->inputs()) {
if (j++ > 0) os << ", ";
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 3dd66eaf41..17094c23c5 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -20,6 +20,8 @@ class RegisterAllocator;
class Schedule;
class SourcePositionTable;
+FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
+ const char* suffix, const char* mode);
struct AsDOT {
explicit AsDOT(const Graph& g) : graph(g) {}
@@ -30,8 +32,9 @@ std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
struct AsJSON {
- explicit AsJSON(const Graph& g) : graph(g) {}
+ AsJSON(const Graph& g, SourcePositionTable* p) : graph(g), positions(p) {}
const Graph& graph;
+ const SourcePositionTable* positions;
};
std::ostream& operator<<(std::ostream& os, const AsJSON& ad);
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index 995046b74e..193861187b 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -4,14 +4,10 @@
#include "src/compiler/graph.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
+#include <algorithm>
+
+#include "src/base/bits.h"
#include "src/compiler/node.h"
-#include "src/compiler/node-aux-data-inl.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/opcodes.h"
-#include "src/compiler/operator-properties.h"
namespace v8 {
namespace internal {
@@ -19,30 +15,46 @@ namespace compiler {
Graph::Graph(Zone* zone)
: zone_(zone),
- start_(NULL),
- end_(NULL),
+ start_(nullptr),
+ end_(nullptr),
mark_max_(0),
next_node_id_(0),
decorators_(zone) {}
-void Graph::Decorate(Node* node) {
- for (ZoneVector<GraphDecorator*>::iterator i = decorators_.begin();
- i != decorators_.end(); ++i) {
- (*i)->Decorate(node);
+void Graph::Decorate(Node* node, bool incomplete) {
+ for (auto const decorator : decorators_) {
+ decorator->Decorate(node, incomplete);
}
}
+void Graph::AddDecorator(GraphDecorator* decorator) {
+ decorators_.push_back(decorator);
+}
+
+
+void Graph::RemoveDecorator(GraphDecorator* decorator) {
+ auto const it = std::find(decorators_.begin(), decorators_.end(), decorator);
+ DCHECK(it != decorators_.end());
+ decorators_.erase(it);
+}
+
+
Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
bool incomplete) {
DCHECK_LE(op->ValueInputCount(), input_count);
- Node* result = Node::New(this, input_count, inputs, incomplete);
- result->Initialize(op);
- if (!incomplete) {
- Decorate(result);
- }
- return result;
+ Node* const node =
+ Node::New(zone(), NextNodeId(), op, input_count, inputs, incomplete);
+ Decorate(node, incomplete);
+ return node;
+}
+
+
+NodeId Graph::NextNodeId() {
+ NodeId const id = next_node_id_;
+ CHECK(!base::bits::SignedAddOverflow32(id, 1, &next_node_id_));
+ return id;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index d619da252f..882e549f8d 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -5,12 +5,8 @@
#ifndef V8_COMPILER_GRAPH_H_
#define V8_COMPILER_GRAPH_H_
-#include <map>
-#include <set>
-
-#include "src/compiler/node.h"
-#include "src/compiler/node-aux-data.h"
-#include "src/compiler/source-position.h"
+#include "src/zone.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -18,6 +14,19 @@ namespace compiler {
// Forward declarations.
class GraphDecorator;
+class Node;
+class Operator;
+
+
+// Marks are used during traversal of the graph to distinguish states of nodes.
+// Each node has a mark which is a monotonically increasing integer, and a
+// {NodeMarker} has a range of values that indicate states of a node.
+typedef uint32_t Mark;
+
+
+// NodeIds are identifying numbers for nodes that can be used to index auxiliary
+// out-of-line data associated with each node.
+typedef int32_t NodeId;
class Graph : public ZoneObject {
@@ -71,27 +80,18 @@ class Graph : public ZoneObject {
void SetStart(Node* start) { start_ = start; }
void SetEnd(Node* end) { end_ = end; }
- NodeId NextNodeID() { return next_node_id_++; }
- NodeId NodeCount() const { return next_node_id_; }
+ int NodeCount() const { return next_node_id_; }
- void Decorate(Node* node);
-
- void AddDecorator(GraphDecorator* decorator) {
- decorators_.push_back(decorator);
- }
-
- void RemoveDecorator(GraphDecorator* decorator) {
- ZoneVector<GraphDecorator*>::iterator it =
- std::find(decorators_.begin(), decorators_.end(), decorator);
- DCHECK(it != decorators_.end());
- decorators_.erase(it, it + 1);
- }
+ void Decorate(Node* node, bool incomplete);
+ void AddDecorator(GraphDecorator* decorator);
+ void RemoveDecorator(GraphDecorator* decorator);
private:
- template <typename State>
- friend class NodeMarker;
+ friend class NodeMarkerBase;
- Zone* zone_;
+ inline NodeId NextNodeId();
+
+ Zone* const zone_;
Node* start_;
Node* end_;
Mark mark_max_;
@@ -102,46 +102,12 @@ class Graph : public ZoneObject {
};
-// A NodeMarker uses monotonically increasing marks to assign local "states"
-// to nodes. Only one NodeMarker per graph is valid at a given time.
-template <typename State>
-class NodeMarker BASE_EMBEDDED {
- public:
- NodeMarker(Graph* graph, uint32_t num_states)
- : mark_min_(graph->mark_max_), mark_max_(graph->mark_max_ += num_states) {
- DCHECK(num_states > 0); // user error!
- DCHECK(mark_max_ > mark_min_); // check for wraparound.
- }
-
- State Get(Node* node) {
- Mark mark = node->mark();
- if (mark < mark_min_) {
- mark = mark_min_;
- node->set_mark(mark_min_);
- }
- DCHECK_LT(mark, mark_max_);
- return static_cast<State>(mark - mark_min_);
- }
-
- void Set(Node* node, State state) {
- Mark local = static_cast<Mark>(state);
- DCHECK(local < (mark_max_ - mark_min_));
- DCHECK_LT(node->mark(), mark_max_);
- node->set_mark(local + mark_min_);
- }
-
- private:
- Mark mark_min_;
- Mark mark_max_;
-};
-
-
// A graph decorator can be used to add behavior to the creation of nodes
// in a graph.
class GraphDecorator : public ZoneObject {
public:
virtual ~GraphDecorator() {}
- virtual void Decorate(Node* node) = 0;
+ virtual void Decorate(Node* node, bool incomplete) = 0;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 55f7426a4c..d20848918d 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -7,7 +7,6 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
#include "src/scopes.h"
@@ -311,6 +310,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
case kArchNop:
// don't emit code for nops.
break;
@@ -730,27 +735,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
case kSignedGreaterThan:
__ j(greater, tlabel);
break;
- case kUnorderedLessThan:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kUnsignedLessThan:
__ j(below, tlabel);
break;
- case kUnorderedGreaterThanOrEqual:
- __ j(parity_even, tlabel);
- // Fall through.
case kUnsignedGreaterThanOrEqual:
__ j(above_equal, tlabel);
break;
- case kUnorderedLessThanOrEqual:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kUnsignedLessThanOrEqual:
__ j(below_equal, tlabel);
break;
- case kUnorderedGreaterThan:
- __ j(parity_even, tlabel);
- // Fall through.
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
@@ -780,7 +773,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
@@ -812,35 +805,15 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case kSignedGreaterThan:
cc = greater;
break;
- case kUnorderedLessThan:
- __ j(parity_odd, &check, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedLessThan:
cc = below;
break;
- case kUnorderedGreaterThanOrEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ mov(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedGreaterThanOrEqual:
cc = above_equal;
break;
- case kUnorderedLessThanOrEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedLessThanOrEqual:
cc = below_equal;
break;
- case kUnorderedGreaterThan:
- __ j(parity_odd, &check, Label::kNear);
- __ mov(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedGreaterThan:
cc = above;
break;
@@ -869,6 +842,32 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmp(input, Immediate(i.InputInt32(index + 0)));
+ __ j(equal, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (size_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ cmp(input, Immediate(case_count));
+ __ j(above_equal, GetLabel(i.InputRpo(1)));
+ __ jmp(Operand::JumpTable(input, times_4, table));
+}
+
+
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
@@ -1006,8 +1005,7 @@ void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- Frame* frame = this->frame();
- int stack_slots = frame->GetSpillSlotCount();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
// Assemble a prologue similar the to cdecl calling convention.
__ push(ebp);
@@ -1020,19 +1018,37 @@ void CodeGenerator::AssemblePrologue() {
__ push(Register::from_code(i));
register_save_area_size += kPointerSize;
}
- frame->SetRegisterSaveAreaSize(register_save_area_size);
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
}
} else if (descriptor->IsJSFunctionCall()) {
+ // TODO(turbofan): this prologue is redundant with OSR, but needed for
+ // code aging.
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
- frame->SetRegisterSaveAreaSize(
+ frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ } else if (stack_slots > 0) {
__ StubPrologue();
- frame->SetRegisterSaveAreaSize(
+ frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
+ stack_slots -= frame()->GetOsrStackSlotCount();
+ }
+
if (stack_slots > 0) {
+ // Allocate the stack slots used by this frame.
__ sub(esp, Immediate(stack_slots * kPointerSize));
}
}
@@ -1040,11 +1056,11 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
const RegList saves = descriptor->CalleeSavedRegisters();
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ add(esp, Immediate(stack_slots * kPointerSize));
}
@@ -1063,13 +1079,15 @@ void CodeGenerator::AssembleReturn() {
__ pop(ebp); // Pop caller's frame pointer.
__ ret(0);
}
- } else {
+ } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ ret(pop_count * kPointerSize);
+ } else {
+ __ ret(0);
}
}
@@ -1230,6 +1248,13 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ dd(targets[index]);
+ }
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 16063ab43b..beec701903 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -4,7 +4,7 @@
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -16,7 +16,7 @@ class IA32OperandGenerator FINAL : public OperandGenerator {
explicit IA32OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseByteRegister(Node* node) {
+ InstructionOperand UseByteRegister(Node* node) {
// TODO(dcarney): relax constraint.
return UseFixed(node, edx);
}
@@ -31,7 +31,8 @@ class IA32OperandGenerator FINAL : public OperandGenerator {
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
- return !isolate()->heap()->InNewSpace(*value.handle());
+ Isolate* isolate = value.handle()->GetIsolate();
+ return !isolate->heap()->InNewSpace(*value.handle());
}
default:
return false;
@@ -40,7 +41,7 @@ class IA32OperandGenerator FINAL : public OperandGenerator {
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
Node* displacement_node,
- InstructionOperand* inputs[],
+ InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
int32_t displacement = (displacement_node == NULL)
@@ -98,7 +99,7 @@ class IA32OperandGenerator FINAL : public OperandGenerator {
}
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
- InstructionOperand* inputs[],
+ InstructionOperand inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
DCHECK(m.matches());
@@ -156,9 +157,9 @@ void InstructionSelector::VisitLoad(Node* node) {
}
IA32OperandGenerator g(this);
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand* inputs[3];
+ InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
@@ -180,8 +181,8 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
- Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
+ InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
+ Emit(kIA32StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
temps);
return;
@@ -212,7 +213,7 @@ void InstructionSelector::VisitStore(Node* node) {
return;
}
- InstructionOperand* val;
+ InstructionOperand val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
} else if (rep == kRepWord8 || rep == kRepBit) {
@@ -221,13 +222,13 @@ void InstructionSelector::VisitStore(Node* node) {
val = g.UseRegister(value);
}
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
+ Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
@@ -259,8 +260,8 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.UseRegister(offset);
- InstructionOperand* length_operand =
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
if (g.CanBeImmediate(buffer)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
@@ -302,20 +303,20 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* value_operand =
+ InstructionOperand value_operand =
g.CanBeImmediate(value)
? g.UseImmediate(value)
: ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
: g.UseRegister(value));
- InstructionOperand* offset_operand = g.UseRegister(offset);
- InstructionOperand* length_operand =
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
offset_operand, length_operand, value_operand, offset_operand,
g.UseImmediate(buffer));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
+ Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
offset_operand, length_operand, value_operand, g.UseRegister(buffer),
offset_operand);
}
@@ -329,9 +330,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
@@ -343,7 +344,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
// mov eax, [ebp-0x10]
// add eax, [ebp-0x10]
// jo label
- InstructionOperand* const input = g.UseRegister(left);
+ InstructionOperand const input = g.UseRegister(left);
inputs[input_count++] = input;
inputs[input_count++] = input;
} else if (g.CanBeImmediate(right)) {
@@ -369,8 +370,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
@@ -439,7 +440,7 @@ void VisitMulHigh(InstructionSelector* selector, Node* node,
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand* temps[] = {g.TempRegister(edx)};
+ InstructionOperand temps[] = {g.TempRegister(edx)};
selector->Emit(opcode, g.DefineAsFixed(node, eax),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
@@ -456,7 +457,7 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
void EmitLea(InstructionSelector* selector, Node* result, Node* index,
int scale, Node* base, Node* displacement) {
IA32OperandGenerator g(selector);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
index, scale, base, displacement, inputs, &input_count);
@@ -464,7 +465,7 @@ void EmitLea(InstructionSelector* selector, Node* result, Node* index,
DCHECK_NE(0, static_cast<int>(input_count));
DCHECK_GE(arraysize(inputs), input_count);
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(result);
InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
@@ -509,7 +510,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
(m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
@@ -517,7 +518,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
DCHECK_NE(0, static_cast<int>(input_count));
DCHECK_GE(arraysize(inputs), input_count);
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
@@ -681,7 +682,7 @@ void InstructionSelector::VisitFloat64Div(Node* node) {
void InstructionSelector::VisitFloat64Mod(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand* temps[] = {g.TempRegister(eax)};
+ InstructionOperand temps[] = {g.TempRegister(eax)};
Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
temps);
@@ -734,11 +735,14 @@ void InstructionSelector::VisitCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
+ for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
+ ++i) {
// TODO(titzer): handle pushing double parameters.
- Emit(kIA32Push, NULL,
- g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
+ InstructionOperand value =
+ g.CanBeImmediate(*i) ? g.UseImmediate(*i) : IsSupported(ATOM)
+ ? g.UseRegister(*i)
+ : g.Use(*i);
+ Emit(kIA32Push, g.NoOutput(), value);
}
// Select the appropriate opcode based on the call type.
@@ -758,7 +762,7 @@ void InstructionSelector::VisitCall(Node* node) {
opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
- InstructionOperand** first_output =
+ InstructionOperand* first_output =
buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
Instruction* call_instr =
Emit(opcode, buffer.outputs.size(), first_output,
@@ -771,11 +775,11 @@ namespace {
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
IA32OperandGenerator g(selector);
if (cont->IsBranch()) {
- selector->Emit(cont->Encode(opcode), NULL, left, right,
+ selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
@@ -799,11 +803,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
}
-// Shared routine for multiple float compare operations.
+// Shared routine for multiple float64 compare operations (inputs commuted).
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
- cont, node->op()->HasProperty(Operator::kCommutative));
+ Node* const left = node->InputAt(0);
+ Node* const right = node->InputAt(1);
+ VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
}
@@ -868,22 +873,22 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
if (result == NULL || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
@@ -923,6 +928,67 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ IA32OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 4 + value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (min_value) {
+ index_operand = g.TempRegister();
+ Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-min_value));
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -958,7 +1024,7 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kIA32Add, &cont);
}
@@ -968,7 +1034,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kIA32Sub, &cont);
}
@@ -984,13 +1050,13 @@ void InstructionSelector::VisitFloat64Equal(Node* node) {
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
@@ -1006,6 +1072,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
}
return MachineOperatorBuilder::Flag::kNoFlags;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/ia32/linkage-ia32.cc b/deps/v8/src/compiler/ia32/linkage-ia32.cc
index 12cc34f3b9..19dbc43f20 100644
--- a/deps/v8/src/compiler/ia32/linkage-ia32.cc
+++ b/deps/v8/src/compiler/ia32/linkage-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/assembler.h"
#include "src/code-stubs.h"
#include "src/compiler/linkage.h"
@@ -30,30 +28,32 @@ struct IA32LinkageHelperTraits {
typedef LinkageHelper<IA32LinkageHelperTraits> LH;
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+ return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
+ return LH::GetStubCallDescriptor(isolate, zone, descriptor,
+ stack_parameter_count, flags, properties);
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
+ const MachineSignature* sig) {
return LH::GetSimplifiedCDescriptor(zone, sig);
}
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index ea1785417e..ef1e942ed4 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -19,6 +19,8 @@
#include "src/compiler/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/x64/instruction-codes-x64.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/compiler/ppc/instruction-codes-ppc.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
@@ -35,6 +37,8 @@ namespace compiler {
V(ArchCallCodeObject) \
V(ArchCallJSFunction) \
V(ArchJmp) \
+ V(ArchLookupSwitch) \
+ V(ArchTableSwitch) \
V(ArchNop) \
V(ArchRet) \
V(ArchStackPointer) \
@@ -102,10 +106,6 @@ enum FlagsCondition {
kUnsignedGreaterThan,
kUnorderedEqual,
kUnorderedNotEqual,
- kUnorderedLessThan,
- kUnorderedGreaterThanOrEqual,
- kUnorderedLessThanOrEqual,
- kUnorderedGreaterThan,
kOverflow,
kNotOverflow
};
@@ -129,7 +129,7 @@ typedef int32_t InstructionCode;
typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
typedef BitField<AddressingMode, 7, 5> AddressingModeField;
typedef BitField<FlagsMode, 12, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 14, 5> FlagsConditionField;
+typedef BitField<FlagsCondition, 14, 4> FlagsConditionField;
typedef BitField<int, 14, 18> MiscField;
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index bdcd952b5f..90898ba947 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -21,132 +21,139 @@ class OperandGenerator {
explicit OperandGenerator(InstructionSelector* selector)
: selector_(selector) {}
- InstructionOperand* DefineAsRegister(Node* node) {
- return Define(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ InstructionOperand NoOutput() {
+ return InstructionOperand(); // Generates an invalid operand.
}
- InstructionOperand* DefineSameAsFirst(Node* result) {
- return Define(result, new (zone())
- UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT));
+ InstructionOperand DefineAsRegister(Node* node) {
+ return Define(node,
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ GetVReg(node)));
}
- InstructionOperand* DefineAsFixed(Node* node, Register reg) {
- return Define(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg)));
+ InstructionOperand DefineSameAsFirst(Node* node) {
+ return Define(node,
+ UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT,
+ GetVReg(node)));
}
- InstructionOperand* DefineAsFixed(Node* node, DoubleRegister reg) {
- return Define(node, new (zone())
+ InstructionOperand DefineAsFixed(Node* node, Register reg) {
+ return Define(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg),
+ GetVReg(node)));
+ }
+
+ InstructionOperand DefineAsFixed(Node* node, DoubleRegister reg) {
+ return Define(node,
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg)));
+ DoubleRegister::ToAllocationIndex(reg),
+ GetVReg(node)));
}
- InstructionOperand* DefineAsConstant(Node* node) {
+ InstructionOperand DefineAsConstant(Node* node) {
selector()->MarkAsDefined(node);
- int virtual_register = selector_->GetVirtualRegister(node);
+ int virtual_register = GetVReg(node);
sequence()->AddConstant(virtual_register, ToConstant(node));
- return ConstantOperand::Create(virtual_register, zone());
+ return ConstantOperand(virtual_register);
}
- InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location,
- MachineType type) {
- return Define(node, ToUnallocatedOperand(location, type));
+ InstructionOperand DefineAsLocation(Node* node, LinkageLocation location,
+ MachineType type) {
+ return Define(node, ToUnallocatedOperand(location, type, GetVReg(node)));
}
- InstructionOperand* Use(Node* node) {
- return Use(
- node, new (zone()) UnallocatedOperand(
- UnallocatedOperand::NONE, UnallocatedOperand::USED_AT_START));
+ InstructionOperand Use(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::NONE,
+ UnallocatedOperand::USED_AT_START,
+ GetVReg(node)));
}
- InstructionOperand* UseRegister(Node* node) {
- return Use(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
- UnallocatedOperand::USED_AT_START));
+ InstructionOperand UseRegister(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START,
+ GetVReg(node)));
}
// Use register or operand for the node. If a register is chosen, it won't
// alias any temporary or output registers.
- InstructionOperand* UseUnique(Node* node) {
- return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::NONE));
+ InstructionOperand UseUnique(Node* node) {
+ return Use(node,
+ UnallocatedOperand(UnallocatedOperand::NONE, GetVReg(node)));
}
// Use a unique register for the node that does not alias any temporary or
// output registers.
- InstructionOperand* UseUniqueRegister(Node* node) {
- return Use(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ InstructionOperand UseUniqueRegister(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ GetVReg(node)));
}
- InstructionOperand* UseFixed(Node* node, Register reg) {
- return Use(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg)));
+ InstructionOperand UseFixed(Node* node, Register reg) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg),
+ GetVReg(node)));
}
- InstructionOperand* UseFixed(Node* node, DoubleRegister reg) {
- return Use(node, new (zone())
+ InstructionOperand UseFixed(Node* node, DoubleRegister reg) {
+ return Use(node,
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg)));
+ DoubleRegister::ToAllocationIndex(reg),
+ GetVReg(node)));
}
- InstructionOperand* UseImmediate(Node* node) {
+ InstructionOperand UseImmediate(Node* node) {
int index = sequence()->AddImmediate(ToConstant(node));
- return ImmediateOperand::Create(index, zone());
+ return ImmediateOperand(index);
}
- InstructionOperand* UseLocation(Node* node, LinkageLocation location,
- MachineType type) {
- return Use(node, ToUnallocatedOperand(location, type));
+ InstructionOperand UseLocation(Node* node, LinkageLocation location,
+ MachineType type) {
+ return Use(node, ToUnallocatedOperand(location, type, GetVReg(node)));
}
- InstructionOperand* TempRegister() {
- UnallocatedOperand* op =
- new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
- UnallocatedOperand::USED_AT_START);
- op->set_virtual_register(sequence()->NextVirtualRegister());
- return op;
+ InstructionOperand TempRegister() {
+ return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START,
+ sequence()->NextVirtualRegister());
}
- InstructionOperand* TempDoubleRegister() {
- UnallocatedOperand* op =
- new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
- UnallocatedOperand::USED_AT_START);
- op->set_virtual_register(sequence()->NextVirtualRegister());
- sequence()->MarkAsDouble(op->virtual_register());
+ InstructionOperand TempDoubleRegister() {
+ UnallocatedOperand op = UnallocatedOperand(
+ UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
+ sequence()->MarkAsDouble(op.virtual_register());
return op;
}
- InstructionOperand* TempRegister(Register reg) {
- return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ InstructionOperand TempRegister(Register reg) {
+ return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg),
+ InstructionOperand::kInvalidVirtualRegister);
}
- InstructionOperand* TempImmediate(int32_t imm) {
+ InstructionOperand TempImmediate(int32_t imm) {
int index = sequence()->AddImmediate(Constant(imm));
- return ImmediateOperand::Create(index, zone());
+ return ImmediateOperand(index);
}
- InstructionOperand* TempLocation(LinkageLocation location, MachineType type) {
- UnallocatedOperand* op = ToUnallocatedOperand(location, type);
- op->set_virtual_register(sequence()->NextVirtualRegister());
- return op;
+ InstructionOperand TempLocation(LinkageLocation location, MachineType type) {
+ return ToUnallocatedOperand(location, type,
+ sequence()->NextVirtualRegister());
}
- InstructionOperand* Label(BasicBlock* block) {
+ InstructionOperand Label(BasicBlock* block) {
int index = sequence()->AddImmediate(Constant(block->GetRpoNumber()));
- return ImmediateOperand::Create(index, zone());
+ return ImmediateOperand(index);
}
protected:
InstructionSelector* selector() const { return selector_; }
InstructionSequence* sequence() const { return selector()->sequence(); }
- Isolate* isolate() const { return zone()->isolate(); }
Zone* zone() const { return selector()->instruction_zone(); }
private:
+ int GetVReg(Node* node) const { return selector_->GetVirtualRegister(node); }
+
static Constant ToConstant(const Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
@@ -169,38 +176,47 @@ class OperandGenerator {
return Constant(static_cast<int32_t>(0));
}
- UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
+ UnallocatedOperand Define(Node* node, UnallocatedOperand operand) {
DCHECK_NOT_NULL(node);
- DCHECK_NOT_NULL(operand);
- operand->set_virtual_register(selector_->GetVirtualRegister(node));
+ DCHECK_EQ(operand.virtual_register(), GetVReg(node));
selector()->MarkAsDefined(node);
return operand;
}
- UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
+ UnallocatedOperand Use(Node* node, UnallocatedOperand operand) {
DCHECK_NOT_NULL(node);
- DCHECK_NOT_NULL(operand);
- operand->set_virtual_register(selector_->GetVirtualRegister(node));
+ DCHECK_EQ(operand.virtual_register(), GetVReg(node));
selector()->MarkAsUsed(node);
return operand;
}
- UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location,
- MachineType type) {
+ UnallocatedOperand ToUnallocatedOperand(LinkageLocation location,
+ MachineType type,
+ int virtual_register) {
if (location.location_ == LinkageLocation::ANY_REGISTER) {
- return new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
+ // any machine register.
+ return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ virtual_register);
}
if (location.location_ < 0) {
- return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
- location.location_);
+ // a location on the caller frame.
+ return UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
+ location.location_, virtual_register);
}
+ if (location.location_ > LinkageLocation::ANY_REGISTER) {
+ // a spill location on this (callee) frame.
+ return UnallocatedOperand(
+ UnallocatedOperand::FIXED_SLOT,
+ location.location_ - LinkageLocation::ANY_REGISTER - 1,
+ virtual_register);
+ }
+ // a fixed register.
if (RepresentationOf(type) == kRepFloat64) {
- return new (zone()) UnallocatedOperand(
- UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_);
+ return UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+ location.location_, virtual_register);
}
- return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- location.location_);
+ return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ location.location_, virtual_register);
}
InstructionSelector* selector_;
@@ -294,18 +310,6 @@ class FlagsContinuation FINAL {
case kUnorderedEqual:
case kUnorderedNotEqual:
return;
- case kUnorderedLessThan:
- condition_ = kUnorderedGreaterThan;
- return;
- case kUnorderedGreaterThanOrEqual:
- condition_ = kUnorderedLessThanOrEqual;
- return;
- case kUnorderedLessThanOrEqual:
- condition_ = kUnorderedGreaterThanOrEqual;
- return;
- case kUnorderedGreaterThan:
- condition_ = kUnorderedLessThan;
- return;
}
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index ffb8f9fa8d..41b957a691 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -4,67 +4,67 @@
#include "src/compiler/instruction-selector.h"
-#include "src/compiler/graph.h"
+#include <limits>
+
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
namespace v8 {
namespace internal {
namespace compiler {
-InstructionSelector::InstructionSelector(Zone* local_zone, Graph* graph,
+InstructionSelector::InstructionSelector(Zone* zone, size_t node_count,
Linkage* linkage,
InstructionSequence* sequence,
Schedule* schedule,
SourcePositionTable* source_positions,
Features features)
- : zone_(local_zone),
+ : zone_(zone),
linkage_(linkage),
sequence_(sequence),
source_positions_(source_positions),
features_(features),
schedule_(schedule),
- node_map_(graph->NodeCount(), kNodeUnmapped, zone()),
current_block_(NULL),
- instructions_(zone()),
- defined_(graph->NodeCount(), false, zone()),
- used_(graph->NodeCount(), false, zone()) {}
+ instructions_(zone),
+ defined_(node_count, false, zone),
+ used_(node_count, false, zone),
+ virtual_registers_(node_count,
+ InstructionOperand::kInvalidVirtualRegister, zone) {
+ instructions_.reserve(node_count);
+}
void InstructionSelector::SelectInstructions() {
// Mark the inputs of all phis in loop headers as used.
BasicBlockVector* blocks = schedule()->rpo_order();
- for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
- BasicBlock* block = *i;
+ for (auto const block : *blocks) {
if (!block->IsLoopHeader()) continue;
- DCHECK_NE(0, static_cast<int>(block->PredecessorCount()));
- DCHECK_NE(1, static_cast<int>(block->PredecessorCount()));
- for (BasicBlock::const_iterator j = block->begin(); j != block->end();
- ++j) {
- Node* phi = *j;
+ DCHECK_LE(2u, block->PredecessorCount());
+ for (Node* const phi : *block) {
if (phi->opcode() != IrOpcode::kPhi) continue;
// Mark all inputs as used.
- for (Node* const k : phi->inputs()) {
- MarkAsUsed(k);
+ for (Node* const input : phi->inputs()) {
+ MarkAsUsed(input);
}
}
}
// Visit each basic block in post order.
- for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
+ for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
VisitBlock(*i);
}
// Schedule the selected instructions.
- for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
- BasicBlock* block = *i;
+ for (auto const block : *blocks) {
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(block->GetRpoNumber());
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
+ DCHECK_LE(end, start);
sequence()->StartBlock(block->GetRpoNumber());
while (start-- > end) {
sequence()->AddInstruction(instructions_[start]);
@@ -75,30 +75,30 @@ void InstructionSelector::SelectInstructions() {
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
+ InstructionOperand output,
size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
- InstructionOperand* a, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand output,
+ InstructionOperand a, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
- InstructionOperand* a,
- InstructionOperand* b, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b};
+ InstructionOperand output,
+ InstructionOperand a,
+ InstructionOperand b, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -106,13 +106,13 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
- InstructionOperand* a,
- InstructionOperand* b,
- InstructionOperand* c, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c};
+ InstructionOperand output,
+ InstructionOperand a,
+ InstructionOperand b,
+ InstructionOperand c, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -120,11 +120,11 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
- InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
- size_t temp_count, InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c, d};
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, InstructionOperand c, InstructionOperand d,
+ size_t temp_count, InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c, d};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -132,11 +132,11 @@ Instruction* InstructionSelector::Emit(
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
- InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, size_t temp_count, InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c, d, e};
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c, d, e};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -144,12 +144,12 @@ Instruction* InstructionSelector::Emit(
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
- InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, InstructionOperand* f, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c, d, e, f};
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, InstructionOperand f, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c, d, e, f};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -157,9 +157,9 @@ Instruction* InstructionSelector::Emit(
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
- size_t input_count, InstructionOperand** inputs, size_t temp_count,
- InstructionOperand** temps) {
+ InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
+ size_t input_count, InstructionOperand* inputs, size_t temp_count,
+ InstructionOperand* temps) {
Instruction* instr =
Instruction::New(instruction_zone(), opcode, output_count, outputs,
input_count, inputs, temp_count, temps);
@@ -180,58 +180,70 @@ bool InstructionSelector::CanCover(Node* user, Node* node) const {
int InstructionSelector::GetVirtualRegister(const Node* node) {
- if (node_map_[node->id()] == kNodeUnmapped) {
- node_map_[node->id()] = sequence()->NextVirtualRegister();
+ DCHECK_NOT_NULL(node);
+ size_t const id = node->id();
+ DCHECK_LT(id, virtual_registers_.size());
+ int virtual_register = virtual_registers_[id];
+ if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
+ virtual_register = sequence()->NextVirtualRegister();
+ virtual_registers_[id] = virtual_register;
}
- return node_map_[node->id()];
+ return virtual_register;
}
-int InstructionSelector::GetMappedVirtualRegister(const Node* node) const {
- return node_map_[node->id()];
+const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
+ const {
+ std::map<NodeId, int> virtual_registers;
+ for (size_t n = 0; n < virtual_registers_.size(); ++n) {
+ if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
+ NodeId const id = static_cast<NodeId>(n);
+ virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
+ }
+ }
+ return virtual_registers;
}
bool InstructionSelector::IsDefined(Node* node) const {
DCHECK_NOT_NULL(node);
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(defined_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, defined_.size());
return defined_[id];
}
void InstructionSelector::MarkAsDefined(Node* node) {
DCHECK_NOT_NULL(node);
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(defined_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, defined_.size());
defined_[id] = true;
}
bool InstructionSelector::IsUsed(Node* node) const {
+ DCHECK_NOT_NULL(node);
if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(used_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, used_.size());
return used_[id];
}
void InstructionSelector::MarkAsUsed(Node* node) {
DCHECK_NOT_NULL(node);
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(used_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, used_.size());
used_[id] = true;
}
bool InstructionSelector::IsDouble(const Node* node) const {
DCHECK_NOT_NULL(node);
- int virtual_register = GetMappedVirtualRegister(node);
- if (virtual_register == kNodeUnmapped) return false;
+ int const virtual_register = virtual_registers_[node->id()];
+ if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
+ return false;
+ }
return sequence()->IsDouble(virtual_register);
}
@@ -245,8 +257,10 @@ void InstructionSelector::MarkAsDouble(Node* node) {
bool InstructionSelector::IsReference(const Node* node) const {
DCHECK_NOT_NULL(node);
- int virtual_register = GetMappedVirtualRegister(node);
- if (virtual_register == kNodeUnmapped) return false;
+ int const virtual_register = virtual_registers_[node->id()];
+ if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
+ return false;
+ }
return sequence()->IsReference(virtual_register);
}
@@ -259,15 +273,15 @@ void InstructionSelector::MarkAsReference(Node* node) {
void InstructionSelector::MarkAsRepresentation(MachineType rep,
- InstructionOperand* op) {
- UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
+ const InstructionOperand& op) {
+ UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
switch (RepresentationOf(rep)) {
case kRepFloat32:
case kRepFloat64:
- sequence()->MarkAsDouble(unalloc->virtual_register());
+ sequence()->MarkAsDouble(unalloc.virtual_register());
break;
case kRepTagged:
- sequence()->MarkAsReference(unalloc->virtual_register());
+ sequence()->MarkAsReference(unalloc.virtual_register());
break;
default:
break;
@@ -325,8 +339,14 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (buffer->descriptor->ReturnCount() == 1) {
buffer->output_nodes.push_back(call);
} else {
- buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), NULL);
- call->CollectProjections(&buffer->output_nodes);
+ buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
+ for (auto use : call->uses()) {
+ if (use->opcode() != IrOpcode::kProjection) continue;
+ size_t const index = ProjectionIndexOf(use->op());
+ DCHECK_LT(index, buffer->output_nodes.size());
+ DCHECK(!buffer->output_nodes[index]);
+ buffer->output_nodes[index] = use;
+ }
}
// Filter out the outputs that aren't live because no projection uses them.
@@ -345,7 +365,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->descriptor->GetReturnLocation(static_cast<int>(i));
Node* output = buffer->output_nodes[i];
- InstructionOperand* op =
+ InstructionOperand op =
output == NULL ? g.TempLocation(location, type)
: g.DefineAsLocation(output, location, type);
MarkAsRepresentation(type, op);
@@ -409,15 +429,15 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
DCHECK(iter != call->inputs().end());
DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
if (index == 0) continue; // The first argument (callee) is already done.
- InstructionOperand* op =
+ InstructionOperand op =
g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
buffer->descriptor->GetInputType(index));
- if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
- int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
+ if (UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
+ int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
buffer->pushed_nodes.resize(stack_index + 1, NULL);
}
- DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]);
+ DCHECK(!buffer->pushed_nodes[stack_index]);
buffer->pushed_nodes[stack_index] = *iter;
pushed_count++;
} else {
@@ -432,7 +452,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
void InstructionSelector::VisitBlock(BasicBlock* block) {
- DCHECK_EQ(NULL, current_block_);
+ DCHECK(!current_block_);
current_block_ = block;
int current_block_end = static_cast<int>(instructions_.size());
@@ -465,7 +485,9 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
}
-static inline void CheckNoPhis(const BasicBlock* block) {
+namespace {
+
+V8_INLINE void CheckNoPhis(const BasicBlock* block) {
#ifdef DEBUG
// Branch targets should not have phis.
for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
@@ -475,6 +497,8 @@ static inline void CheckNoPhis(const BasicBlock* block) {
#endif
}
+} // namespace
+
void InstructionSelector::VisitControl(BasicBlock* block) {
Node* input = block->control_input();
@@ -490,8 +514,41 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
CheckNoPhis(tbranch);
CheckNoPhis(fbranch);
if (tbranch == fbranch) return VisitGoto(tbranch);
+ // Treat special Branch(Always, IfTrue, IfFalse) as Goto(IfTrue).
+ Node* const condition = input->InputAt(0);
+ if (condition->opcode() == IrOpcode::kAlways) return VisitGoto(tbranch);
return VisitBranch(input, tbranch, fbranch);
}
+ case BasicBlock::kSwitch: {
+ DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
+ // Last successor must be Default.
+ BasicBlock* default_branch = block->successors().back();
+ DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
+ // SSA deconstruction requires targets of branches not to have phis.
+ // Edge split form guarantees this property, but is more strict.
+ CheckNoPhis(default_branch);
+ // All other successors must be cases.
+ size_t case_count = block->SuccessorCount() - 1;
+ DCHECK_LE(1u, case_count);
+ BasicBlock** case_branches = &block->successors().front();
+ // Determine case values and their min/max.
+ int32_t* case_values = zone()->NewArray<int32_t>(case_count);
+ int32_t min_value = std::numeric_limits<int32_t>::max();
+ int32_t max_value = std::numeric_limits<int32_t>::min();
+ for (size_t index = 0; index < case_count; ++index) {
+ BasicBlock* branch = case_branches[index];
+ int32_t value = OpParameter<int32_t>(branch->front()->op());
+ case_values[index] = value;
+ if (min_value > value) min_value = value;
+ if (max_value < value) max_value = value;
+ // SSA deconstruction requires targets of branches not to have phis.
+ // Edge split form guarantees this property, but is more strict.
+ CheckNoPhis(branch);
+ }
+ DCHECK_LE(min_value, max_value);
+ return VisitSwitch(input, default_branch, case_branches, case_values,
+ case_count, min_value, max_value);
+ }
case BasicBlock::kReturn: {
// If the result itself is a return, return its input.
Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
@@ -500,10 +557,11 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
return VisitReturn(value);
}
case BasicBlock::kThrow:
- return VisitThrow(input);
+ DCHECK_EQ(IrOpcode::kThrow, input->opcode());
+ return VisitThrow(input->InputAt(0));
case BasicBlock::kNone: {
// TODO(titzer): exit block doesn't have control.
- DCHECK(input == NULL);
+ DCHECK_NULL(input);
break;
}
default:
@@ -522,15 +580,20 @@ MachineType InstructionSelector::GetMachineType(Node* node) {
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
+ case IrOpcode::kSwitch:
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
case IrOpcode::kEffectPhi:
+ case IrOpcode::kEffectSet:
case IrOpcode::kMerge:
- case IrOpcode::kTerminate:
// No code needed for these graph artifacts.
return kMachNone;
case IrOpcode::kFinish:
return kMachAnyTagged;
case IrOpcode::kParameter:
return linkage()->GetParameterType(OpParameter<int>(node));
+ case IrOpcode::kOsrValue:
+ return kMachAnyTagged;
case IrOpcode::kPhi:
return OpParameter<MachineType>(node);
case IrOpcode::kProjection:
@@ -658,6 +721,9 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
+ case IrOpcode::kSwitch:
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
// No code needed for these graph artifacts.
@@ -669,6 +735,8 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type, node);
return VisitParameter(node);
}
+ case IrOpcode::kOsrValue:
+ return MarkAsReference(node), VisitOsrValue(node);
case IrOpcode::kPhi: {
MachineType type = OpParameter<MachineType>(node);
MarkAsRepresentation(type, node);
@@ -685,9 +753,12 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kFloat64Constant:
return MarkAsDouble(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
- case IrOpcode::kNumberConstant:
- // TODO(turbofan): only mark non-smis as references.
return MarkAsReference(node), VisitConstant(node);
+ case IrOpcode::kNumberConstant: {
+ double value = OpParameter<double>(node);
+ if (!IsSmiDouble(value)) MarkAsReference(node);
+ return VisitConstant(node);
+ }
case IrOpcode::kCall:
return VisitCall(node);
case IrOpcode::kFrameState:
@@ -953,6 +1024,14 @@ void InstructionSelector::VisitParameter(Node* node) {
}
+void InstructionSelector::VisitOsrValue(Node* node) {
+ OperandGenerator g(this);
+ int index = OpParameter<int>(node);
+ Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
+ kMachAnyTagged));
+}
+
+
void InstructionSelector::VisitPhi(Node* node) {
const int input_count = node->op()->ValueInputCount();
PhiInstruction* phi = new (instruction_zone())
@@ -962,7 +1041,7 @@ void InstructionSelector::VisitPhi(Node* node) {
for (int i = 0; i < input_count; ++i) {
Node* const input = node->InputAt(i);
MarkAsUsed(input);
- phi->Extend(instruction_zone(), GetVirtualRegister(input));
+ phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
}
}
@@ -973,10 +1052,10 @@ void InstructionSelector::VisitProjection(Node* node) {
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
- if (OpParameter<size_t>(node) == 0) {
+ if (ProjectionIndexOf(node->op()) == 0u) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
- DCHECK(OpParameter<size_t>(node) == 1u);
+ DCHECK(ProjectionIndexOf(node->op()) == 1u);
MarkAsUsed(value);
}
break;
@@ -997,23 +1076,25 @@ void InstructionSelector::VisitConstant(Node* node) {
void InstructionSelector::VisitGoto(BasicBlock* target) {
// jump to the next block.
OperandGenerator g(this);
- Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
+ Emit(kArchJmp, g.NoOutput(), g.Label(target))->MarkAsControl();
}
void InstructionSelector::VisitReturn(Node* value) {
OperandGenerator g(this);
if (value != NULL) {
- Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation(),
- linkage()->GetReturnType()));
+ Emit(kArchRet, g.NoOutput(),
+ g.UseLocation(value, linkage()->GetReturnLocation(),
+ linkage()->GetReturnType()));
} else {
- Emit(kArchRet, NULL);
+ Emit(kArchRet, g.NoOutput());
}
}
void InstructionSelector::VisitThrow(Node* value) {
- UNIMPLEMENTED(); // TODO(titzer)
+ OperandGenerator g(this);
+ Emit(kArchNop, g.NoOutput()); // TODO(titzer)
}
@@ -1052,7 +1133,7 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
}
-static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
+static InstructionOperand UseOrImmediate(OperandGenerator* g, Node* input) {
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kNumberConstant:
@@ -1133,6 +1214,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ UNIMPLEMENTED();
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 5e3c52f959..5c31db74e9 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -5,11 +5,12 @@
#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
#define V8_COMPILER_INSTRUCTION_SELECTOR_H_
-#include <deque>
+#include <map>
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -21,17 +22,17 @@ struct CallBuffer; // TODO(bmeurer): Remove this.
class FlagsContinuation;
class Linkage;
-typedef IntVector NodeToVregMap;
+typedef ZoneVector<InstructionOperand> InstructionOperandVector;
+
+
+// Instruction selection generates an InstructionSequence for a given Schedule.
class InstructionSelector FINAL {
public:
- static const int kNodeUnmapped = -1;
-
// Forward declarations.
class Features;
- // TODO(dcarney): pass in vreg mapping instead of graph.
- InstructionSelector(Zone* local_zone, Graph* graph, Linkage* linkage,
+ InstructionSelector(Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions,
Features features = SupportedFeatures());
@@ -43,36 +44,36 @@ class InstructionSelector FINAL {
// ============= Architecture-independent code emission methods. =============
// ===========================================================================
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- size_t temp_count = 0, InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, size_t temp_count = 0,
- InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- size_t temp_count = 0, InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- InstructionOperand* c, size_t temp_count = 0,
- InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- InstructionOperand* c, InstructionOperand* d,
- size_t temp_count = 0, InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, size_t temp_count = 0,
- InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, InstructionOperand* f,
- size_t temp_count = 0, InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ size_t temp_count = 0, InstructionOperand* temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, size_t temp_count = 0,
+ InstructionOperand* temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ size_t temp_count = 0, InstructionOperand* temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ InstructionOperand c, size_t temp_count = 0,
+ InstructionOperand* temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ InstructionOperand c, InstructionOperand d,
+ size_t temp_count = 0, InstructionOperand* temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, size_t temp_count = 0,
+ InstructionOperand* temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, InstructionOperand f,
+ size_t temp_count = 0, InstructionOperand* temps = NULL);
Instruction* Emit(InstructionCode opcode, size_t output_count,
- InstructionOperand** outputs, size_t input_count,
- InstructionOperand** inputs, size_t temp_count = 0,
- InstructionOperand* *temps = NULL);
+ InstructionOperand* outputs, size_t input_count,
+ InstructionOperand* inputs, size_t temp_count = 0,
+ InstructionOperand* temps = NULL);
Instruction* Emit(Instruction* instr);
// ===========================================================================
@@ -126,9 +127,7 @@ class InstructionSelector FINAL {
bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
int GetVirtualRegister(const Node* node);
- // Gets the current mapping if it exists, kNodeUnmapped otherwise.
- int GetMappedVirtualRegister(const Node* node) const;
- const NodeToVregMap& GetNodeMapForTesting() const { return node_map_; }
+ const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
private:
friend class OperandGenerator;
@@ -158,7 +157,7 @@ class InstructionSelector FINAL {
// Inform the register allocation of the representation of the unallocated
// operand {op}.
- void MarkAsRepresentation(MachineType rep, InstructionOperand* op);
+ void MarkAsRepresentation(MachineType rep, const InstructionOperand& op);
// Initialize the call buffer with the InstructionOperands, nodes, etc,
// corresponding
@@ -196,12 +195,16 @@ class InstructionSelector FINAL {
void VisitFinish(Node* node);
void VisitParameter(Node* node);
+ void VisitOsrValue(Node* node);
void VisitPhi(Node* node);
void VisitProjection(Node* node);
void VisitConstant(Node* node);
void VisitCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
+ void VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches, int32_t* case_values,
+ size_t case_count, int32_t min_value, int32_t max_value);
void VisitReturn(Node* value);
void VisitThrow(Node* value);
void VisitDeoptimize(Node* deopt);
@@ -222,11 +225,11 @@ class InstructionSelector FINAL {
SourcePositionTable* const source_positions_;
Features features_;
Schedule* const schedule_;
- NodeToVregMap node_map_;
BasicBlock* current_block_;
- ZoneDeque<Instruction*> instructions_;
+ ZoneVector<Instruction*> instructions_;
BoolVector defined_;
BoolVector used_;
+ IntVector virtual_registers_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index f83cdebede..ebd8125848 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -50,50 +50,14 @@ std::ostream& operator<<(std::ostream& os,
return os << "[" << conf->general_register_name(op.index()) << "|R]";
case InstructionOperand::DOUBLE_REGISTER:
return os << "[" << conf->double_register_name(op.index()) << "|R]";
+ case InstructionOperand::INVALID:
+ return os << "(x)";
}
UNREACHABLE();
return os;
}
-template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
-SubKindOperand<kOperandKind, kNumCachedOperands>*
- SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
-
-
-template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
-void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
- if (cache) return;
- cache = new SubKindOperand[kNumCachedOperands];
- for (int i = 0; i < kNumCachedOperands; i++) {
- cache[i].ConvertTo(kOperandKind, i);
- }
-}
-
-
-template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
-void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
- delete[] cache;
- cache = NULL;
-}
-
-
-void InstructionOperand::SetUpCaches() {
-#define INSTRUCTION_OPERAND_SETUP(name, type, number) \
- name##Operand::SetUpCache();
- INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_SETUP)
-#undef INSTRUCTION_OPERAND_SETUP
-}
-
-
-void InstructionOperand::TearDownCaches() {
-#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
- name##Operand::TearDownCache();
- INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_TEARDOWN)
-#undef INSTRUCTION_OPERAND_TEARDOWN
-}
-
-
std::ostream& operator<<(std::ostream& os,
const PrintableMoveOperands& printable) {
const MoveOperands& mo = *printable.move_operands_;
@@ -117,6 +81,40 @@ bool ParallelMove::IsRedundant() const {
}
+Instruction::Instruction(InstructionCode opcode)
+ : opcode_(opcode),
+ bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
+ TempCountField::encode(0) | IsCallField::encode(false) |
+ IsControlField::encode(false)),
+ pointer_map_(NULL) {}
+
+
+Instruction::Instruction(InstructionCode opcode, size_t output_count,
+ InstructionOperand* outputs, size_t input_count,
+ InstructionOperand* inputs, size_t temp_count,
+ InstructionOperand* temps)
+ : opcode_(opcode),
+ bit_field_(OutputCountField::encode(output_count) |
+ InputCountField::encode(input_count) |
+ TempCountField::encode(temp_count) |
+ IsCallField::encode(false) | IsControlField::encode(false)),
+ pointer_map_(NULL) {
+ size_t offset = 0;
+ for (size_t i = 0; i < output_count; ++i) {
+ DCHECK(!outputs[i].IsInvalid());
+ operands_[offset++] = outputs[i];
+ }
+ for (size_t i = 0; i < input_count; ++i) {
+ DCHECK(!inputs[i].IsInvalid());
+ operands_[offset++] = inputs[i];
+ }
+ for (size_t i = 0; i < temp_count; ++i) {
+ DCHECK(!temps[i].IsInvalid());
+ operands_[offset++] = temps[i];
+ }
+}
+
+
bool GapInstruction::IsRedundant() const {
for (int i = GapInstruction::FIRST_INNER_POSITION;
i <= GapInstruction::LAST_INNER_POSITION; i++) {
@@ -252,14 +250,6 @@ std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc) {
return os << "unordered equal";
case kUnorderedNotEqual:
return os << "unordered not equal";
- case kUnorderedLessThan:
- return os << "unordered less than";
- case kUnorderedGreaterThanOrEqual:
- return os << "unordered greater than or equal";
- case kUnorderedLessThanOrEqual:
- return os << "unordered less than or equal";
- case kUnorderedGreaterThan:
- return os << "unordered greater than";
case kOverflow:
return os << "overflow";
case kNotOverflow:
@@ -287,7 +277,7 @@ std::ostream& operator<<(std::ostream& os,
if (instr.IsGapMoves()) {
const GapInstruction* gap = GapInstruction::cast(&instr);
- os << (instr.IsBlockStart() ? " block-start" : "gap ");
+ os << "gap ";
for (int i = GapInstruction::FIRST_INNER_POSITION;
i <= GapInstruction::LAST_INNER_POSITION; i++) {
os << "(";
@@ -347,6 +337,22 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
}
+PhiInstruction::PhiInstruction(Zone* zone, int virtual_register,
+ size_t input_count)
+ : virtual_register_(virtual_register),
+ output_(UnallocatedOperand(UnallocatedOperand::NONE, virtual_register)),
+ operands_(input_count, zone),
+ inputs_(input_count, zone) {}
+
+
+void PhiInstruction::SetInput(size_t offset, int virtual_register) {
+ DCHECK(inputs_[offset].IsInvalid());
+ auto input = UnallocatedOperand(UnallocatedOperand::ANY, virtual_register);
+ inputs_[offset] = input;
+ operands_[offset] = virtual_register;
+}
+
+
InstructionBlock::InstructionBlock(Zone* zone, BasicBlock::Id id,
BasicBlock::RpoNumber rpo_number,
BasicBlock::RpoNumber loop_header,
@@ -395,14 +401,12 @@ static InstructionBlock* InstructionBlockFor(Zone* zone,
GetLoopEndRpo(block), block->deferred());
// Map successors and precessors
instr_block->successors().reserve(block->SuccessorCount());
- for (auto it = block->successors_begin(); it != block->successors_end();
- ++it) {
- instr_block->successors().push_back((*it)->GetRpoNumber());
+ for (BasicBlock* successor : block->successors()) {
+ instr_block->successors().push_back(successor->GetRpoNumber());
}
instr_block->predecessors().reserve(block->PredecessorCount());
- for (auto it = block->predecessors_begin(); it != block->predecessors_end();
- ++it) {
- instr_block->predecessors().push_back((*it)->GetRpoNumber());
+ for (BasicBlock* predecessor : block->predecessors()) {
+ instr_block->predecessors().push_back(predecessor->GetRpoNumber());
}
return instr_block;
}
@@ -416,7 +420,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
size_t rpo_number = 0;
for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin();
it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
- DCHECK_EQ(NULL, (*blocks)[rpo_number]);
+ DCHECK(!(*blocks)[rpo_number]);
DCHECK((*it)->GetRpoNumber().ToSize() == rpo_number);
(*blocks)[rpo_number] = InstructionBlockFor(zone, *it);
}
@@ -440,9 +444,11 @@ void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
}
-InstructionSequence::InstructionSequence(Zone* instruction_zone,
+InstructionSequence::InstructionSequence(Isolate* isolate,
+ Zone* instruction_zone,
InstructionBlocks* instruction_blocks)
- : zone_(instruction_zone),
+ : isolate_(isolate),
+ zone_(instruction_zone),
instruction_blocks_(instruction_blocks),
block_starts_(zone()),
constants_(ConstantMap::key_compare(),
@@ -458,10 +464,17 @@ InstructionSequence::InstructionSequence(Zone* instruction_zone,
}
-BlockStartInstruction* InstructionSequence::GetBlockStart(
+int InstructionSequence::NextVirtualRegister() {
+ int virtual_register = next_virtual_register_++;
+ CHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
+ return virtual_register;
+}
+
+
+GapInstruction* InstructionSequence::GetBlockStart(
BasicBlock::RpoNumber rpo) const {
const InstructionBlock* block = InstructionBlockAt(rpo);
- return BlockStartInstruction::cast(InstructionAt(block->code_start()));
+ return GapInstruction::cast(InstructionAt(block->code_start()));
}
@@ -471,26 +484,26 @@ void InstructionSequence::StartBlock(BasicBlock::RpoNumber rpo) {
int code_start = static_cast<int>(instructions_.size());
block->set_code_start(code_start);
block_starts_.push_back(code_start);
- BlockStartInstruction* block_start = BlockStartInstruction::New(zone());
- AddInstruction(block_start);
}
void InstructionSequence::EndBlock(BasicBlock::RpoNumber rpo) {
int end = static_cast<int>(instructions_.size());
InstructionBlock* block = InstructionBlockAt(rpo);
+ if (block->code_start() == end) { // Empty block. Insert a nop.
+ AddInstruction(Instruction::New(zone(), kArchNop));
+ end = static_cast<int>(instructions_.size());
+ }
DCHECK(block->code_start() >= 0 && block->code_start() < end);
block->set_code_end(end);
}
int InstructionSequence::AddInstruction(Instruction* instr) {
- // TODO(titzer): the order of these gaps is a holdover from Lithium.
GapInstruction* gap = GapInstruction::New(zone());
- if (instr->IsControl()) instructions_.push_back(gap);
+ instructions_.push_back(gap);
int index = static_cast<int>(instructions_.size());
instructions_.push_back(instr);
- if (!instr->IsControl()) instructions_.push_back(gap);
if (instr->NeedsPointerMap()) {
DCHECK(instr->pointer_map() == NULL);
PointerMap* pointer_map = new (zone()) PointerMap(zone());
@@ -669,10 +682,10 @@ std::ostream& operator<<(std::ostream& os,
for (auto phi : block->phis()) {
PrintableInstructionOperand printable_op = {
- printable.register_configuration_, phi->output()};
+ printable.register_configuration_, &phi->output()};
os << " phi: " << printable_op << " =";
for (auto input : phi->inputs()) {
- printable_op.op_ = input;
+ printable_op.op_ = &input;
os << " " << printable_op;
}
os << "\n";
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index daa83f29bc..d04d0367f5 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -25,20 +25,22 @@ namespace compiler {
// A couple of reserved opcodes are used for internal use.
const InstructionCode kGapInstruction = -1;
-const InstructionCode kBlockStartInstruction = -2;
-const InstructionCode kSourcePositionInstruction = -3;
-
-#define INSTRUCTION_OPERAND_LIST(V) \
- V(Constant, CONSTANT, 0) \
- V(Immediate, IMMEDIATE, 0) \
- V(StackSlot, STACK_SLOT, 128) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
- V(Register, REGISTER, RegisterConfiguration::kMaxGeneralRegisters) \
- V(DoubleRegister, DOUBLE_REGISTER, RegisterConfiguration::kMaxDoubleRegisters)
-
-class InstructionOperand : public ZoneObject {
+const InstructionCode kSourcePositionInstruction = -2;
+
+#define INSTRUCTION_OPERAND_LIST(V) \
+ V(Constant, CONSTANT) \
+ V(Immediate, IMMEDIATE) \
+ V(StackSlot, STACK_SLOT) \
+ V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
+ V(Register, REGISTER) \
+ V(DoubleRegister, DOUBLE_REGISTER)
+
+class InstructionOperand {
public:
+ static const int kInvalidVirtualRegister = -1;
+
enum Kind {
+ INVALID,
UNALLOCATED,
CONSTANT,
IMMEDIATE,
@@ -48,14 +50,27 @@ class InstructionOperand : public ZoneObject {
DOUBLE_REGISTER
};
- InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
+ InstructionOperand() : virtual_register_(kInvalidVirtualRegister) {
+ ConvertTo(INVALID, 0);
+ }
+
+ InstructionOperand(Kind kind, int index)
+ : virtual_register_(kInvalidVirtualRegister) {
+ DCHECK(kind != INVALID);
+ ConvertTo(kind, index);
+ }
+
+ static InstructionOperand* New(Zone* zone, Kind kind, int index) {
+ return New(zone, InstructionOperand(kind, index));
+ }
Kind kind() const { return KindField::decode(value_); }
int index() const { return static_cast<int>(value_) >> KindField::kSize; }
-#define INSTRUCTION_OPERAND_PREDICATE(name, type, number) \
+#define INSTRUCTION_OPERAND_PREDICATE(name, type) \
bool Is##name() const { return kind() == type; }
INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
- INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+ INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
+ INSTRUCTION_OPERAND_PREDICATE(Invalid, INVALID)
#undef INSTRUCTION_OPERAND_PREDICATE
bool Equals(const InstructionOperand* other) const {
return value_ == other->value_;
@@ -66,19 +81,26 @@ class InstructionOperand : public ZoneObject {
value_ = KindField::encode(kind);
value_ |= bit_cast<unsigned>(index << KindField::kSize);
DCHECK(this->index() == index);
+ if (kind != UNALLOCATED) virtual_register_ = kInvalidVirtualRegister;
}
- // Calls SetUpCache()/TearDownCache() for each subclass.
- static void SetUpCaches();
- static void TearDownCaches();
-
protected:
- typedef BitField64<Kind, 0, 3> KindField;
+ template <typename SubKindOperand>
+ static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
+ void* buffer = zone->New(sizeof(op));
+ return new (buffer) SubKindOperand(op);
+ }
- uint64_t value_;
-};
+ InstructionOperand(Kind kind, int index, int virtual_register)
+ : virtual_register_(virtual_register) {
+ ConvertTo(kind, index);
+ }
+ typedef BitField<Kind, 0, 3> KindField;
-typedef ZoneVector<InstructionOperand*> InstructionOperandVector;
+ uint32_t value_;
+ // TODO(dcarney): this should really be unsigned.
+ int32_t virtual_register_;
+};
struct PrintableInstructionOperand {
const RegisterConfiguration* register_configuration_;
@@ -115,45 +137,42 @@ class UnallocatedOperand : public InstructionOperand {
USED_AT_END
};
- explicit UnallocatedOperand(ExtendedPolicy policy)
- : InstructionOperand(UNALLOCATED, 0) {
- value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
+ UnallocatedOperand(ExtendedPolicy policy, int virtual_register)
+ : InstructionOperand(UNALLOCATED, 0, virtual_register) {
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
}
- UnallocatedOperand(BasicPolicy policy, int index)
- : InstructionOperand(UNALLOCATED, 0) {
+ UnallocatedOperand(BasicPolicy policy, int index, int virtual_register)
+ : InstructionOperand(UNALLOCATED, 0, virtual_register) {
DCHECK(policy == FIXED_SLOT);
- value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
value_ |= BasicPolicyField::encode(policy);
- value_ |= static_cast<int64_t>(index) << FixedSlotIndexField::kShift;
+ value_ |= static_cast<int32_t>(index) << FixedSlotIndexField::kShift;
DCHECK(this->fixed_slot_index() == index);
}
- UnallocatedOperand(ExtendedPolicy policy, int index)
- : InstructionOperand(UNALLOCATED, 0) {
+ UnallocatedOperand(ExtendedPolicy policy, int index, int virtual_register)
+ : InstructionOperand(UNALLOCATED, 0, virtual_register) {
DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
- value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
value_ |= FixedRegisterField::encode(index);
}
- UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
- : InstructionOperand(UNALLOCATED, 0) {
- value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
+ UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime,
+ int virtual_register)
+ : InstructionOperand(UNALLOCATED, 0, virtual_register) {
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(lifetime);
}
+ UnallocatedOperand* Copy(Zone* zone) { return New(zone, *this); }
+
UnallocatedOperand* CopyUnconstrained(Zone* zone) {
- UnallocatedOperand* result = new (zone) UnallocatedOperand(ANY);
- result->set_virtual_register(virtual_register());
- return result;
+ return New(zone, UnallocatedOperand(ANY, virtual_register()));
}
static const UnallocatedOperand* cast(const InstructionOperand* op) {
@@ -166,41 +185,43 @@ class UnallocatedOperand : public InstructionOperand {
return static_cast<UnallocatedOperand*>(op);
}
+ static UnallocatedOperand cast(const InstructionOperand& op) {
+ DCHECK(op.IsUnallocated());
+ return *static_cast<const UnallocatedOperand*>(&op);
+ }
+
// The encoding used for UnallocatedOperand operands depends on the policy
// that is
// stored within the operand. The FIXED_SLOT policy uses a compact encoding
// because it accommodates a larger pay-load.
//
// For FIXED_SLOT policy:
- // +------------------------------------------+
- // | slot_index | vreg | 0 | 001 |
- // +------------------------------------------+
+ // +-----------------------------+
+ // | slot_index | 0 | 001 |
+ // +-----------------------------+
//
// For all other (extended) policies:
- // +------------------------------------------+
- // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
- // +------------------------------------------+ P ... Policy
+ // +----------------------------------+
+ // | reg_index | L | PPP | 1 | 001 | L ... Lifetime
+ // +----------------------------------+ P ... Policy
//
// The slot index is a signed value which requires us to decode it manually
- // instead of using the BitField64 utility class.
+ // instead of using the BitField utility class.
// The superclass has a KindField.
STATIC_ASSERT(KindField::kSize == 3);
// BitFields for all unallocated operands.
- class BasicPolicyField : public BitField64<BasicPolicy, 3, 1> {};
- class VirtualRegisterField : public BitField64<unsigned, 4, 30> {};
+ class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
// BitFields specific to BasicPolicy::FIXED_SLOT.
- class FixedSlotIndexField : public BitField64<int, 34, 30> {};
+ class FixedSlotIndexField : public BitField<int, 4, 28> {};
// BitFields specific to BasicPolicy::EXTENDED_POLICY.
- class ExtendedPolicyField : public BitField64<ExtendedPolicy, 34, 3> {};
- class LifetimeField : public BitField64<Lifetime, 37, 1> {};
- class FixedRegisterField : public BitField64<int, 38, 6> {};
+ class ExtendedPolicyField : public BitField<ExtendedPolicy, 4, 3> {};
+ class LifetimeField : public BitField<Lifetime, 7, 1> {};
+ class FixedRegisterField : public BitField<int, 8, 6> {};
- static const int kInvalidVirtualRegister = VirtualRegisterField::kMax;
- static const int kMaxVirtualRegisters = VirtualRegisterField::kMax;
static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
@@ -233,7 +254,10 @@ class UnallocatedOperand : public InstructionOperand {
}
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
- BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); }
+ BasicPolicy basic_policy() const {
+ DCHECK_EQ(UNALLOCATED, kind());
+ return BasicPolicyField::decode(value_);
+ }
// [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
ExtendedPolicy extended_policy() const {
@@ -244,7 +268,7 @@ class UnallocatedOperand : public InstructionOperand {
// [fixed_slot_index]: Only for FIXED_SLOT.
int fixed_slot_index() const {
DCHECK(HasFixedSlotPolicy());
- return static_cast<int>(bit_cast<int64_t>(value_) >>
+ return static_cast<int>(bit_cast<int32_t>(value_) >>
FixedSlotIndexField::kShift);
}
@@ -255,9 +279,15 @@ class UnallocatedOperand : public InstructionOperand {
}
// [virtual_register]: The virtual register ID for this operand.
- int virtual_register() const { return VirtualRegisterField::decode(value_); }
- void set_virtual_register(unsigned id) {
- value_ = VirtualRegisterField::update(value_, id);
+ int32_t virtual_register() const {
+ DCHECK_EQ(UNALLOCATED, kind());
+ return virtual_register_;
+ }
+
+ // TODO(dcarney): remove this.
+ void set_virtual_register(int32_t id) {
+ DCHECK_EQ(UNALLOCATED, kind());
+ virtual_register_ = id;
}
// [lifetime]: Only for non-FIXED_SLOT.
@@ -317,41 +347,33 @@ struct PrintableMoveOperands {
std::ostream& operator<<(std::ostream& os, const PrintableMoveOperands& mo);
-template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
-class SubKindOperand FINAL : public InstructionOperand {
- public:
- static SubKindOperand* Create(int index, Zone* zone) {
- DCHECK(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new (zone) SubKindOperand(index);
- }
-
- static SubKindOperand* cast(InstructionOperand* op) {
- DCHECK(op->kind() == kOperandKind);
- return reinterpret_cast<SubKindOperand*>(op);
- }
-
- static const SubKindOperand* cast(const InstructionOperand* op) {
- DCHECK(op->kind() == kOperandKind);
- return reinterpret_cast<const SubKindOperand*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static SubKindOperand* cache;
-
- SubKindOperand() : InstructionOperand(kOperandKind, 0) {} // For the caches.
- explicit SubKindOperand(int index)
- : InstructionOperand(kOperandKind, index) {}
-};
-
-
-#define INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
- typedef SubKindOperand<InstructionOperand::type, number> name##Operand;
-INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS)
-#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS
+#define INSTRUCTION_SUBKIND_OPERAND_CLASS(SubKind, kOperandKind) \
+ class SubKind##Operand FINAL : public InstructionOperand { \
+ public: \
+ explicit SubKind##Operand(int index) \
+ : InstructionOperand(kOperandKind, index) {} \
+ \
+ static SubKind##Operand* New(int index, Zone* zone) { \
+ return InstructionOperand::New(zone, SubKind##Operand(index)); \
+ } \
+ \
+ static SubKind##Operand* cast(InstructionOperand* op) { \
+ DCHECK(op->kind() == kOperandKind); \
+ return reinterpret_cast<SubKind##Operand*>(op); \
+ } \
+ \
+ static const SubKind##Operand* cast(const InstructionOperand* op) { \
+ DCHECK(op->kind() == kOperandKind); \
+ return reinterpret_cast<const SubKind##Operand*>(op); \
+ } \
+ \
+ static SubKind##Operand cast(const InstructionOperand& op) { \
+ DCHECK(op.kind() == kOperandKind); \
+ return *static_cast<const SubKind##Operand*>(&op); \
+ } \
+ };
+INSTRUCTION_OPERAND_LIST(INSTRUCTION_SUBKIND_OPERAND_CLASS)
+#undef INSTRUCTION_SUBKIND_OPERAND_CLASS
class ParallelMove FINAL : public ZoneObject {
@@ -419,31 +441,40 @@ class PointerMap FINAL : public ZoneObject {
std::ostream& operator<<(std::ostream& os, const PointerMap& pm);
// TODO(titzer): s/PointerMap/ReferenceMap/
-class Instruction : public ZoneObject {
+class Instruction {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
- InstructionOperand* OutputAt(size_t i) const {
+ const InstructionOperand* OutputAt(size_t i) const {
+ DCHECK(i < OutputCount());
+ return &operands_[i];
+ }
+ InstructionOperand* OutputAt(size_t i) {
DCHECK(i < OutputCount());
- return operands_[i];
+ return &operands_[i];
}
bool HasOutput() const { return OutputCount() == 1; }
- InstructionOperand* Output() const { return OutputAt(0); }
+ const InstructionOperand* Output() const { return OutputAt(0); }
+ InstructionOperand* Output() { return OutputAt(0); }
size_t InputCount() const { return InputCountField::decode(bit_field_); }
- InstructionOperand* InputAt(size_t i) const {
+ const InstructionOperand* InputAt(size_t i) const {
DCHECK(i < InputCount());
- return operands_[OutputCount() + i];
+ return &operands_[OutputCount() + i];
}
- void SetInputAt(size_t i, InstructionOperand* operand) {
+ InstructionOperand* InputAt(size_t i) {
DCHECK(i < InputCount());
- operands_[OutputCount() + i] = operand;
+ return &operands_[OutputCount() + i];
}
size_t TempCount() const { return TempCountField::decode(bit_field_); }
- InstructionOperand* TempAt(size_t i) const {
+ const InstructionOperand* TempAt(size_t i) const {
DCHECK(i < TempCount());
- return operands_[OutputCount() + InputCount() + i];
+ return &operands_[OutputCount() + InputCount() + i];
+ }
+ InstructionOperand* TempAt(size_t i) {
+ DCHECK(i < TempCount());
+ return &operands_[OutputCount() + InputCount() + i];
}
InstructionCode opcode() const { return opcode_; }
@@ -462,18 +493,18 @@ class Instruction : public ZoneObject {
}
static Instruction* New(Zone* zone, InstructionCode opcode,
- size_t output_count, InstructionOperand** outputs,
- size_t input_count, InstructionOperand** inputs,
- size_t temp_count, InstructionOperand** temps) {
+ size_t output_count, InstructionOperand* outputs,
+ size_t input_count, InstructionOperand* inputs,
+ size_t temp_count, InstructionOperand* temps) {
DCHECK(opcode >= 0);
DCHECK(output_count == 0 || outputs != NULL);
DCHECK(input_count == 0 || inputs != NULL);
DCHECK(temp_count == 0 || temps != NULL);
- InstructionOperand* none = NULL;
- USE(none);
- int size = static_cast<int>(RoundUp(sizeof(Instruction), kPointerSize) +
- (output_count + input_count + temp_count - 1) *
- sizeof(none));
+ size_t total_extra_ops = output_count + input_count + temp_count;
+ if (total_extra_ops != 0) total_extra_ops--;
+ int size = static_cast<int>(
+ RoundUp(sizeof(Instruction), sizeof(InstructionOperand)) +
+ total_extra_ops * sizeof(InstructionOperand));
return new (zone->New(size)) Instruction(
opcode, output_count, outputs, input_count, inputs, temp_count, temps);
}
@@ -493,10 +524,7 @@ class Instruction : public ZoneObject {
bool NeedsPointerMap() const { return IsCall(); }
bool HasPointerMap() const { return pointer_map_ != NULL; }
- bool IsGapMoves() const {
- return opcode() == kGapInstruction || opcode() == kBlockStartInstruction;
- }
- bool IsBlockStart() const { return opcode() == kBlockStartInstruction; }
+ bool IsGapMoves() const { return opcode() == kGapInstruction; }
bool IsSourcePosition() const {
return opcode() == kSourcePositionInstruction;
}
@@ -508,16 +536,10 @@ class Instruction : public ZoneObject {
void set_pointer_map(PointerMap* map) {
DCHECK(NeedsPointerMap());
- DCHECK_EQ(NULL, pointer_map_);
+ DCHECK(!pointer_map_);
pointer_map_ = map;
}
- // Placement new operator so that we can smash instructions into
- // zone-allocated memory.
- void* operator new(size_t, void* location) { return location; }
-
- void operator delete(void* pointer, void* location) { UNREACHABLE(); }
-
void OverwriteWithNop() {
opcode_ = ArchOpcodeField::encode(kArchNop);
bit_field_ = 0;
@@ -530,35 +552,12 @@ class Instruction : public ZoneObject {
}
protected:
- explicit Instruction(InstructionCode opcode)
- : opcode_(opcode),
- bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
- TempCountField::encode(0) | IsCallField::encode(false) |
- IsControlField::encode(false)),
- pointer_map_(NULL) {}
-
+ explicit Instruction(InstructionCode opcode);
Instruction(InstructionCode opcode, size_t output_count,
- InstructionOperand** outputs, size_t input_count,
- InstructionOperand** inputs, size_t temp_count,
- InstructionOperand** temps)
- : opcode_(opcode),
- bit_field_(OutputCountField::encode(output_count) |
- InputCountField::encode(input_count) |
- TempCountField::encode(temp_count) |
- IsCallField::encode(false) | IsControlField::encode(false)),
- pointer_map_(NULL) {
- for (size_t i = 0; i < output_count; ++i) {
- operands_[i] = outputs[i];
- }
- for (size_t i = 0; i < input_count; ++i) {
- operands_[output_count + i] = inputs[i];
- }
- for (size_t i = 0; i < temp_count; ++i) {
- operands_[output_count + input_count + i] = temps[i];
- }
- }
+ InstructionOperand* outputs, size_t input_count,
+ InstructionOperand* inputs, size_t temp_count,
+ InstructionOperand* temps);
- protected:
typedef BitField<size_t, 0, 8> OutputCountField;
typedef BitField<size_t, 8, 16> InputCountField;
typedef BitField<size_t, 24, 6> TempCountField;
@@ -568,7 +567,10 @@ class Instruction : public ZoneObject {
InstructionCode opcode_;
uint32_t bit_field_;
PointerMap* pointer_map_;
- InstructionOperand* operands_[1];
+ InstructionOperand operands_[1];
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Instruction);
};
@@ -642,30 +644,6 @@ class GapInstruction : public Instruction {
};
-// This special kind of gap move instruction represents the beginning of a
-// block of code.
-class BlockStartInstruction FINAL : public GapInstruction {
- public:
- static BlockStartInstruction* New(Zone* zone) {
- void* buffer = zone->New(sizeof(BlockStartInstruction));
- return new (buffer) BlockStartInstruction();
- }
-
- static BlockStartInstruction* cast(Instruction* instr) {
- DCHECK(instr->IsBlockStart());
- return static_cast<BlockStartInstruction*>(instr);
- }
-
- static const BlockStartInstruction* cast(const Instruction* instr) {
- DCHECK(instr->IsBlockStart());
- return static_cast<const BlockStartInstruction*>(instr);
- }
-
- private:
- BlockStartInstruction() : GapInstruction(kBlockStartInstruction) {}
-};
-
-
class SourcePositionInstruction FINAL : public Instruction {
public:
static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
@@ -810,33 +788,17 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant);
class PhiInstruction FINAL : public ZoneObject {
public:
- typedef ZoneVector<InstructionOperand*> Inputs;
+ typedef ZoneVector<InstructionOperand> Inputs;
- PhiInstruction(Zone* zone, int virtual_register, size_t reserved_input_count)
- : virtual_register_(virtual_register),
- operands_(zone),
- output_(nullptr),
- inputs_(zone) {
- UnallocatedOperand* output =
- new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
- output->set_virtual_register(virtual_register);
- output_ = output;
- inputs_.reserve(reserved_input_count);
- operands_.reserve(reserved_input_count);
- }
+ PhiInstruction(Zone* zone, int virtual_register, size_t input_count);
+
+ void SetInput(size_t offset, int virtual_register);
int virtual_register() const { return virtual_register_; }
const IntVector& operands() const { return operands_; }
- void Extend(Zone* zone, int virtual_register) {
- UnallocatedOperand* input =
- new (zone) UnallocatedOperand(UnallocatedOperand::ANY);
- input->set_virtual_register(virtual_register);
- operands_.push_back(virtual_register);
- inputs_.push_back(input);
- }
-
- InstructionOperand* output() const { return output_; }
+ const InstructionOperand& output() const { return output_; }
+ InstructionOperand& output() { return output_; }
const Inputs& inputs() const { return inputs_; }
Inputs& inputs() { return inputs_; }
@@ -844,8 +806,8 @@ class PhiInstruction FINAL : public ZoneObject {
// TODO(dcarney): some of these fields are only for verification, move them to
// verifier.
const int virtual_register_;
+ InstructionOperand output_;
IntVector operands_;
- InstructionOperand* output_;
Inputs inputs_;
};
@@ -945,9 +907,10 @@ class InstructionSequence FINAL : public ZoneObject {
// Puts the deferred blocks last.
static void ComputeAssemblyOrder(InstructionBlocks* blocks);
- InstructionSequence(Zone* zone, InstructionBlocks* instruction_blocks);
+ InstructionSequence(Isolate* isolate, Zone* zone,
+ InstructionBlocks* instruction_blocks);
- int NextVirtualRegister() { return next_virtual_register_++; }
+ int NextVirtualRegister();
int VirtualRegisterCount() const { return next_virtual_register_; }
const InstructionBlocks& instruction_blocks() const {
@@ -982,7 +945,7 @@ class InstructionSequence FINAL : public ZoneObject {
void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
- BlockStartInstruction* GetBlockStart(BasicBlock::RpoNumber rpo) const;
+ GapInstruction* GetBlockStart(BasicBlock::RpoNumber rpo) const;
typedef InstructionDeque::const_iterator const_iterator;
const_iterator begin() const { return instructions_.begin(); }
@@ -999,7 +962,7 @@ class InstructionSequence FINAL : public ZoneObject {
return instructions_[index];
}
- Isolate* isolate() const { return zone()->isolate(); }
+ Isolate* isolate() const { return isolate_; }
const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
Zone* zone() const { return zone_; }
@@ -1064,6 +1027,7 @@ class InstructionSequence FINAL : public ZoneObject {
typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
+ Isolate* isolate_;
Zone* const zone_;
InstructionBlocks* const instruction_blocks_;
IntVector block_starts_;
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index ff9f7b4523..a89f4a3255 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -3,11 +3,10 @@
// found in the LICENSE file.
#include "src/compiler/diamond.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/types.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index a700b47ceb..1bf19ac7d6 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -4,23 +4,16 @@
#include "src/compiler/js-context-specialization.h"
-#include "src/compiler.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
namespace compiler {
Reduction JSContextSpecializer::Reduce(Node* node) {
- if (node == context_) {
- Node* constant = jsgraph_->Constant(info_->context());
- NodeProperties::ReplaceWithValue(node, constant);
- return Replace(constant);
- }
if (node->opcode() == IrOpcode::kJSLoadContext) {
return ReduceJSLoadContext(node);
}
@@ -57,12 +50,13 @@ Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
const Operator* op = jsgraph_->javascript()->LoadContext(
0, access.index(), access.immutable());
node->set_op(op);
- Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
+ Handle<Object> context_handle =
+ Handle<Object>(context, jsgraph_->isolate());
node->ReplaceInput(0, jsgraph_->Constant(context_handle));
return Changed(node);
}
Handle<Object> value = Handle<Object>(
- context->get(static_cast<int>(access.index())), info_->isolate());
+ context->get(static_cast<int>(access.index())), jsgraph_->isolate());
// Even though the context slot is immutable, the context might have escaped
// before the function to which it belongs has initialized the slot.
@@ -105,7 +99,8 @@ Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
const Operator* op = jsgraph_->javascript()->StoreContext(0, access.index());
node->set_op(op);
- Handle<Object> new_context_handle = Handle<Object>(context, info_->isolate());
+ Handle<Object> new_context_handle =
+ Handle<Object>(context, jsgraph_->isolate());
node->ReplaceInput(0, jsgraph_->Constant(new_context_handle));
return Changed(node);
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index 298d3a39f8..f58eaa6848 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -8,7 +8,6 @@
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/contexts.h"
-#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -18,8 +17,7 @@ namespace compiler {
// some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
class JSContextSpecializer : public Reducer {
public:
- JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context)
- : info_(info), jsgraph_(jsgraph), context_(context) {}
+ explicit JSContextSpecializer(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
Reduction Reduce(Node* node) OVERRIDE;
@@ -28,9 +26,7 @@ class JSContextSpecializer : public Reducer {
Reduction ReduceJSStoreContext(Node* node);
private:
- CompilationInfo* info_;
JSGraph* jsgraph_;
- Node* context_;
};
}
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 4886442314..4720c582ec 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -5,32 +5,19 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-aux-data-inl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
#include "src/unique.h"
namespace v8 {
namespace internal {
namespace compiler {
-JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph)
- : info_(info),
- jsgraph_(jsgraph),
- linkage_(new (jsgraph->zone()) Linkage(jsgraph->zone(), info)) {}
-
-
-void JSGenericLowering::PatchOperator(Node* node, const Operator* op) {
- node->set_op(op);
-}
-
-
-void JSGenericLowering::PatchInsertInput(Node* node, int index, Node* input) {
- node->InsertInput(zone(), index, input);
-}
+JSGenericLowering::JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph)
+ : is_typing_enabled_(is_typing_enabled), jsgraph_(jsgraph) {}
Reduction JSGenericLowering::Reduce(Node* node) {
@@ -45,10 +32,13 @@ Reduction JSGenericLowering::Reduce(Node* node) {
// TODO(mstarzinger): If typing is enabled then simplified lowering will
// have inserted the correct ChangeBoolToBit, otherwise we need to perform
// poor-man's representation inference here and insert manual change.
- if (!info()->is_typing_enabled()) {
- Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
- jsgraph()->TrueConstant());
- node->ReplaceInput(0, test);
+ if (!is_typing_enabled_) {
+ Node* condition = node->InputAt(0);
+ if (condition->opcode() != IrOpcode::kAlways) {
+ Node* test = graph()->NewNode(machine()->WordEqual(), condition,
+ jsgraph()->TrueConstant());
+ node->ReplaceInput(0, test);
+ }
break;
}
// Fall-through.
@@ -101,7 +91,6 @@ REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE)
REPLACE_RUNTIME_CALL(JSTypeOf, Runtime::kTypeof)
REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort)
REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
-REPLACE_RUNTIME_CALL(JSCreateCatchContext, Runtime::kPushCatchContext)
REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
@@ -111,7 +100,6 @@ REPLACE_RUNTIME_CALL(JSCreateScriptContext, Runtime::kAbort)
#define REPLACE_UNIMPLEMENTED(op) \
void JSGenericLowering::Lower##op(Node* node) { UNIMPLEMENTED(); }
-REPLACE_UNIMPLEMENTED(JSToName)
REPLACE_UNIMPLEMENTED(JSYield)
REPLACE_UNIMPLEMENTED(JSDebugger)
#undef REPLACE_UNIMPLEMENTED
@@ -128,10 +116,11 @@ static CallDescriptor::Flags FlagsForNode(Node* node) {
void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
Callable callable = CodeFactory::CompareIC(isolate(), token);
- bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
- CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(
- callable.descriptor(), 0,
+ CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0,
CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node));
+
+ // Create a new call node asking a CompareIC for help.
NodeVector inputs(zone());
inputs.reserve(node->InputCount() + 1);
inputs.push_back(jsgraph()->HeapConstant(callable.code()));
@@ -141,11 +130,12 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
if (node->op()->HasProperty(Operator::kPure)) {
// A pure (strict) comparison doesn't have an effect, control or frame
// state. But for the graph, we need to add control and effect inputs.
- DCHECK(!has_frame_state);
+ DCHECK(!OperatorProperties::HasFrameStateInput(node->op()));
inputs.push_back(graph()->start());
inputs.push_back(graph()->start());
} else {
- DCHECK(has_frame_state == FLAG_turbo_deoptimization);
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()) ==
+ FLAG_turbo_deoptimization);
if (FLAG_turbo_deoptimization) {
inputs.push_back(NodeProperties::GetFrameStateInput(node));
}
@@ -155,27 +145,65 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
Node* compare =
graph()->NewNode(common()->Call(desc_compare),
static_cast<int>(inputs.size()), &inputs.front());
-
- node->ReplaceInput(0, compare);
- node->ReplaceInput(1, jsgraph()->SmiConstant(token));
-
- if (has_frame_state) {
- // Remove the frame state from inputs.
- node->RemoveInput(NodeProperties::FirstFrameStateIndex(node));
+ NodeProperties::SetBounds(
+ compare, Bounds(Type::None(zone()), Type::UntaggedSigned(zone())));
+
+ // Decide how the return value from the above CompareIC can be converted into
+ // a JavaScript boolean oddball depending on the given token.
+ Node* false_value = jsgraph()->FalseConstant();
+ Node* true_value = jsgraph()->TrueConstant();
+ const Operator* op = nullptr;
+ switch (token) {
+ case Token::EQ: // a == 0
+ case Token::EQ_STRICT:
+ op = machine()->WordEqual();
+ break;
+ case Token::NE: // a != 0 becomes !(a == 0)
+ case Token::NE_STRICT:
+ op = machine()->WordEqual();
+ std::swap(true_value, false_value);
+ break;
+ case Token::LT: // a < 0
+ op = machine()->IntLessThan();
+ break;
+ case Token::GT: // a > 0 becomes !(a <= 0)
+ op = machine()->IntLessThanOrEqual();
+ std::swap(true_value, false_value);
+ break;
+ case Token::LTE: // a <= 0
+ op = machine()->IntLessThanOrEqual();
+ break;
+ case Token::GTE: // a >= 0 becomes !(a < 0)
+ op = machine()->IntLessThan();
+ std::swap(true_value, false_value);
+ break;
+ default:
+ UNREACHABLE();
}
-
- ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
+ Node* booleanize = graph()->NewNode(op, compare, jsgraph()->ZeroConstant());
+
+ // Finally patch the original node to select a boolean.
+ NodeProperties::ReplaceWithValue(node, node, compare);
+ // TODO(mstarzinger): Just a work-around because SelectLowering might
+ // otherwise introduce a Phi without any uses, making Scheduler unhappy.
+ if (node->UseCount() == 0) return;
+ node->TrimInputCount(3);
+ node->ReplaceInput(0, booleanize);
+ node->ReplaceInput(1, true_value);
+ node->ReplaceInput(2, false_value);
+ node->set_op(common()->Select(kMachAnyTagged));
}
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags) {
Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = linkage()->GetStubCallDescriptor(
- callable.descriptor(), 0, flags | FlagsForNode(node), properties);
+ CallDescriptor* desc =
+ Linkage::GetStubCallDescriptor(isolate(), zone(), callable.descriptor(),
+ 0, flags | FlagsForNode(node), properties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- PatchInsertInput(node, 0, stub_code);
- PatchOperator(node, common()->Call(desc));
+ node->InsertInput(zone(), 0, stub_code);
+ node->set_op(common()->Call(desc));
}
@@ -185,17 +213,27 @@ void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
Operator::Properties properties = node->op()->properties();
Callable callable =
CodeFactory::CallFunction(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
- CallDescriptor* desc = linkage()->GetStubCallDescriptor(
- callable.descriptor(), nargs, FlagsForNode(node), properties);
- // TODO(mstarzinger): Accessing the builtins object this way prevents sharing
- // of code across native contexts. Fix this by loading from given context.
- Handle<JSFunction> function(
- JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
+ CallDescriptor* desc =
+ Linkage::GetStubCallDescriptor(isolate(), zone(), callable.descriptor(),
+ nargs, FlagsForNode(node), properties);
+ Node* global_object = graph()->NewNode(
+ machine()->Load(kMachAnyTagged), NodeProperties::GetContextInput(node),
+ jsgraph()->IntPtrConstant(
+ Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
+ NodeProperties::GetEffectInput(node), graph()->start());
+ Node* builtins_object = graph()->NewNode(
+ machine()->Load(kMachAnyTagged), global_object,
+ jsgraph()->IntPtrConstant(GlobalObject::kBuiltinsOffset - kHeapObjectTag),
+ NodeProperties::GetEffectInput(node), graph()->start());
+ Node* function = graph()->NewNode(
+ machine()->Load(kMachAnyTagged), builtins_object,
+ jsgraph()->IntPtrConstant(JSBuiltinsObject::OffsetOfFunctionWithId(id) -
+ kHeapObjectTag),
+ NodeProperties::GetEffectInput(node), graph()->start());
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* function_node = jsgraph()->HeapConstant(function);
- PatchInsertInput(node, 0, stub_code);
- PatchInsertInput(node, 1, function_node);
- PatchOperator(node, common()->Call(desc));
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 1, function);
+ node->set_op(common()->Call(desc));
}
@@ -206,13 +244,13 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
const Runtime::Function* fun = Runtime::FunctionForId(f);
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
CallDescriptor* desc =
- linkage()->GetRuntimeCallDescriptor(f, nargs, properties);
+ Linkage::GetRuntimeCallDescriptor(zone(), f, nargs, properties);
Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
Node* arity = jsgraph()->Int32Constant(nargs);
- PatchInsertInput(node, 0, jsgraph()->CEntryStubConstant(fun->result_size));
- PatchInsertInput(node, nargs + 1, ref);
- PatchInsertInput(node, nargs + 2, arity);
- PatchOperator(node, common()->Call(desc));
+ node->InsertInput(zone(), 0, jsgraph()->CEntryStubConstant(fun->result_size));
+ node->InsertInput(zone(), nargs + 1, ref);
+ node->InsertInput(zone(), nargs + 2, arity);
+ node->set_op(common()->Call(desc));
}
@@ -241,6 +279,11 @@ void JSGenericLowering::LowerJSToString(Node* node) {
}
+void JSGenericLowering::LowerJSToName(Node* node) {
+ ReplaceWithBuiltinCall(node, Builtins::TO_NAME, 1);
+}
+
+
void JSGenericLowering::LowerJSToObject(Node* node) {
ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1);
}
@@ -250,8 +293,9 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
const LoadPropertyParameters& p = LoadPropertyParametersOf(node->op());
Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
if (FLAG_vector_ics) {
- PatchInsertInput(node, 2, jsgraph()->SmiConstant(p.feedback().index()));
- PatchInsertInput(node, 3, jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3,
+ jsgraph()->HeapConstant(p.feedback().vector()));
}
ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
@@ -261,34 +305,35 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
Callable callable =
CodeFactory::LoadICInOptimizedCode(isolate(), p.contextual_mode());
- PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
if (FLAG_vector_ics) {
- PatchInsertInput(node, 2, jsgraph()->SmiConstant(p.feedback().index()));
- PatchInsertInput(node, 3, jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3,
+ jsgraph()->HeapConstant(p.feedback().vector()));
}
ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
- StrictMode strict_mode = OpParameter<StrictMode>(node);
- Callable callable = CodeFactory::KeyedStoreIC(isolate(), strict_mode);
+ LanguageMode language_mode = OpParameter<LanguageMode>(node);
+ Callable callable = CodeFactory::KeyedStoreIC(isolate(), language_mode);
ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
- Callable callable = CodeFactory::StoreIC(isolate(), p.strict_mode());
- PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name()));
+ Callable callable = CodeFactory::StoreIC(isolate(), p.language_mode());
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
- StrictMode strict_mode = OpParameter<StrictMode>(node);
- PatchInsertInput(node, 2, jsgraph()->SmiConstant(strict_mode));
+ LanguageMode language_mode = OpParameter<LanguageMode>(node);
ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
+ node->InsertInput(zone(), 4, jsgraph()->SmiConstant(language_mode));
}
@@ -303,11 +348,11 @@ void JSGenericLowering::LowerJSInstanceOf(Node* node) {
InstanceofStub::kArgsInRegisters);
InstanceofStub stub(isolate(), flags);
CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
- CallDescriptor* desc =
- linkage()->GetStubCallDescriptor(d, 0, FlagsForNode(node));
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(isolate(), zone(), d, 0,
+ FlagsForNode(node));
Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
- PatchInsertInput(node, 0, stub_code);
- PatchOperator(node, common()->Call(desc));
+ node->InsertInput(zone(), 0, stub_code);
+ node->set_op(common()->Call(desc));
}
@@ -325,7 +370,7 @@ void JSGenericLowering::LowerJSLoadContext(Node* node) {
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
node->AppendInput(zone(), graph()->start());
- PatchOperator(node, machine()->Load(kMachAnyTagged));
+ node->set_op(machine()->Load(kMachAnyTagged));
}
@@ -343,8 +388,15 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
- PatchOperator(node, machine()->Store(StoreRepresentation(kMachAnyTagged,
- kFullWriteBarrier)));
+ node->set_op(
+ machine()->Store(StoreRepresentation(kMachAnyTagged, kFullWriteBarrier)));
+}
+
+
+void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
+ Unique<String> name = OpParameter<Unique<String>>(node);
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(name));
+ ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
}
@@ -352,15 +404,15 @@ void JSGenericLowering::LowerJSCallConstruct(Node* node) {
int arity = OpParameter<int>(node);
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
- CallDescriptor* desc =
- linkage()->GetStubCallDescriptor(d, arity, FlagsForNode(node));
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), d, arity, FlagsForNode(node));
Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
Node* construct = NodeProperties::GetValueInput(node, 0);
- PatchInsertInput(node, 0, stub_code);
- PatchInsertInput(node, 1, jsgraph()->Int32Constant(arity - 1));
- PatchInsertInput(node, 2, construct);
- PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
- PatchOperator(node, common()->Call(desc));
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 1, jsgraph()->Int32Constant(arity - 1));
+ node->InsertInput(zone(), 2, construct);
+ node->InsertInput(zone(), 3, jsgraph()->UndefinedConstant());
+ node->set_op(common()->Call(desc));
}
@@ -375,7 +427,9 @@ bool JSGenericLowering::TryLowerDirectJSCall(Node* node) {
Handle<Object> func = function_const.Value().handle();
if (!func->IsJSFunction()) return false; // not a function.
Handle<JSFunction> function = Handle<JSFunction>::cast(func);
- if (arg_count != function->shared()->formal_parameter_count()) return false;
+ if (arg_count != function->shared()->internal_formal_parameter_count()) {
+ return false;
+ }
// Check the receiver doesn't need to be wrapped.
Node* receiver = node->InputAt(1);
@@ -394,9 +448,9 @@ bool JSGenericLowering::TryLowerDirectJSCall(Node* node) {
context = jsgraph()->HeapConstant(Handle<Context>(function->context()));
}
node->ReplaceInput(index, context);
- CallDescriptor* desc = linkage()->GetJSCallDescriptor(
- 1 + arg_count, jsgraph()->zone(), FlagsForNode(node));
- PatchOperator(node, common()->Call(desc));
+ CallDescriptor* desc = Linkage::GetJSCallDescriptor(
+ zone(), false, 1 + arg_count, FlagsForNode(node));
+ node->set_op(common()->Call(desc));
return true;
}
@@ -410,11 +464,12 @@ void JSGenericLowering::LowerJSCallFunction(Node* node) {
int arg_count = static_cast<int>(p.arity() - 2);
CallFunctionStub stub(isolate(), arg_count, p.flags());
CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
- CallDescriptor* desc = linkage()->GetStubCallDescriptor(
- d, static_cast<int>(p.arity() - 1), FlagsForNode(node));
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), d, static_cast<int>(p.arity() - 1),
+ FlagsForNode(node));
Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
- PatchInsertInput(node, 0, stub_code);
- PatchOperator(node, common()->Call(desc));
+ node->InsertInput(zone(), 0, stub_code);
+ node->set_op(common()->Call(desc));
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index f6263385ed..10057eb9e1 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -26,7 +26,7 @@ class Linkage;
// Lowers JS-level operators to runtime and IC calls in the "generic" case.
class JSGenericLowering FINAL : public Reducer {
public:
- JSGenericLowering(CompilationInfo* info, JSGraph* graph);
+ JSGenericLowering(bool is_typing_enabled, JSGraph* graph);
~JSGenericLowering() FINAL {}
Reduction Reduce(Node* node) FINAL;
@@ -37,10 +37,6 @@ class JSGenericLowering FINAL : public Reducer {
JS_OP_LIST(DECLARE_LOWER)
#undef DECLARE_LOWER
- // Helpers to patch existing nodes in the graph.
- void PatchOperator(Node* node, const Operator* new_op);
- void PatchInsertInput(Node* node, int index, Node* input);
-
// Helpers to replace existing nodes with a generic call.
void ReplaceWithCompareIC(Node* node, Token::Value token);
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
@@ -51,18 +47,15 @@ class JSGenericLowering FINAL : public Reducer {
bool TryLowerDirectJSCall(Node* node);
Zone* zone() const { return graph()->zone(); }
- Isolate* isolate() const { return zone()->isolate(); }
+ Isolate* isolate() const { return jsgraph()->isolate(); }
JSGraph* jsgraph() const { return jsgraph_; }
Graph* graph() const { return jsgraph()->graph(); }
- Linkage* linkage() const { return linkage_; }
- CompilationInfo* info() const { return info_; }
CommonOperatorBuilder* common() const { return jsgraph()->common(); }
MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
private:
- CompilationInfo* info_;
+ bool is_typing_enabled_;
JSGraph* jsgraph_;
- Linkage* linkage_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 7759ba1441..649b0d68d1 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -4,7 +4,7 @@
#include "src/code-stubs.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/typer.h"
namespace v8 {
@@ -85,7 +85,7 @@ Node* JSGraph::OneConstant() {
Node* JSGraph::NaNConstant() {
if (!nan_constant_.is_set()) {
- nan_constant_.set(NumberConstant(base::OS::nan_value()));
+ nan_constant_.set(NumberConstant(std::numeric_limits<double>::quiet_NaN()));
}
return nan_constant_.get();
}
@@ -200,6 +200,28 @@ Node* JSGraph::ExternalConstant(ExternalReference reference) {
}
+Node* JSGraph::EmptyFrameState() {
+ if (!empty_frame_state_.is_set()) {
+ Node* values = graph()->NewNode(common()->StateValues(0));
+ Node* state_node = graph()->NewNode(
+ common()->FrameState(JS_FRAME, BailoutId(0),
+ OutputFrameStateCombine::Ignore()),
+ values, values, values, NoContextConstant(), UndefinedConstant());
+ empty_frame_state_.set(state_node);
+ }
+ return empty_frame_state_.get();
+}
+
+
+Node* JSGraph::DeadControl() {
+ if (!dead_control_.is_set()) {
+ Node* dead_node = graph()->NewNode(common()->Dead());
+ dead_control_.set(dead_node);
+ }
+ return dead_control_.get();
+}
+
+
void JSGraph::GetCachedNodes(NodeVector* nodes) {
cache_.GetCachedNodes(nodes);
SetOncePointer<Node>* ptrs[] = {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 040a745e3c..fe9b9f7b7c 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -23,9 +23,10 @@ class Typer;
// constants, and various helper methods.
class JSGraph : public ZoneObject {
public:
- JSGraph(Graph* graph, CommonOperatorBuilder* common,
+ JSGraph(Isolate* isolate, Graph* graph, CommonOperatorBuilder* common,
JSOperatorBuilder* javascript, MachineOperatorBuilder* machine)
- : graph_(graph),
+ : isolate_(isolate),
+ graph_(graph),
common_(common),
javascript_(javascript),
machine_(machine),
@@ -109,17 +110,25 @@ class JSGraph : public ZoneObject {
// stubs and runtime functions that do not require a context.
Node* NoContextConstant() { return ZeroConstant(); }
- JSOperatorBuilder* javascript() { return javascript_; }
- CommonOperatorBuilder* common() { return common_; }
- MachineOperatorBuilder* machine() { return machine_; }
- Graph* graph() { return graph_; }
- Zone* zone() { return graph()->zone(); }
- Isolate* isolate() { return zone()->isolate(); }
- Factory* factory() { return isolate()->factory(); }
+ // Creates an empty frame states for cases where we know that a function
+ // cannot deopt.
+ Node* EmptyFrameState();
+
+ // Create a control node that serves as control dependency for dead nodes.
+ Node* DeadControl();
+
+ JSOperatorBuilder* javascript() const { return javascript_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ Graph* graph() const { return graph_; }
+ Zone* zone() const { return graph()->zone(); }
+ Isolate* isolate() const { return isolate_; }
+ Factory* factory() const { return isolate()->factory(); }
void GetCachedNodes(NodeVector* nodes);
private:
+ Isolate* isolate_;
Graph* graph_;
CommonOperatorBuilder* common_;
JSOperatorBuilder* javascript_;
@@ -135,6 +144,8 @@ class JSGraph : public ZoneObject {
SetOncePointer<Node> zero_constant_;
SetOncePointer<Node> one_constant_;
SetOncePointer<Node> nan_constant_;
+ SetOncePointer<Node> empty_frame_state_;
+ SetOncePointer<Node> dead_control_;
CommonNodeCache cache_;
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index d143382dfd..91d0823dae 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -10,11 +10,9 @@
#include "src/compiler/graph-inl.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/js-inlining.h"
-#include "src/compiler/js-intrinsic-builder.h"
#include "src/compiler/js-operator.h"
-#include "src/compiler/node-aux-data-inl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/typer.h"
#include "src/full-codegen.h"
@@ -27,33 +25,57 @@ namespace v8 {
namespace internal {
namespace compiler {
-class InlinerVisitor : public NullNodeVisitor {
+
+// Provides convenience accessors for calls to JS functions.
+class JSCallFunctionAccessor {
public:
- explicit InlinerVisitor(JSInliner* inliner) : inliner_(inliner) {}
+ explicit JSCallFunctionAccessor(Node* call) : call_(call) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ }
- void Post(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSCallFunction:
- inliner_->TryInlineJSCall(node);
- break;
- case IrOpcode::kJSCallRuntime:
- if (FLAG_turbo_inlining_intrinsics) {
- inliner_->TryInlineRuntimeCall(node);
- }
- break;
- default:
- break;
- }
+ Node* jsfunction() { return call_->InputAt(0); }
+
+ Node* receiver() { return call_->InputAt(1); }
+
+ Node* formal_argument(size_t index) {
+ DCHECK(index < formal_arguments());
+ return call_->InputAt(static_cast<int>(2 + index));
}
+ size_t formal_arguments() {
+ // {value_inputs} includes jsfunction and receiver.
+ size_t value_inputs = call_->op()->ValueInputCount();
+ DCHECK_GE(call_->InputCount(), 2);
+ return value_inputs - 2;
+ }
+
+ Node* frame_state() { return NodeProperties::GetFrameStateInput(call_); }
+
private:
- JSInliner* inliner_;
+ Node* call_;
};
-void JSInliner::Inline() {
- InlinerVisitor visitor(this);
- jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
+Reduction JSInliner::Reduce(Node* node) {
+ if (node->opcode() != IrOpcode::kJSCallFunction) return NoChange();
+
+ JSCallFunctionAccessor call(node);
+ HeapObjectMatcher<JSFunction> match(call.jsfunction());
+ if (!match.HasValue()) return NoChange();
+
+ Handle<JSFunction> jsfunction = match.Value().handle();
+
+ if (jsfunction->shared()->native()) {
+ if (FLAG_trace_turbo_inlining) {
+ SmartArrayPointer<char> name =
+ jsfunction->shared()->DebugName()->ToCString();
+ PrintF("Not Inlining %s into %s because inlinee is native\n", name.get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ }
+ return NoChange();
+ }
+
+ return TryInlineJSCall(node, jsfunction);
}
@@ -90,13 +112,13 @@ class Inlinee {
// Counts only formal parameters.
size_t formal_parameters() {
- DCHECK_GE(total_parameters(), 3);
+ DCHECK_GE(total_parameters(), 3u);
return total_parameters() - 3;
}
// Inline this graph at {call}, use {jsgraph} and its zone to create
// any new nodes.
- void InlineAtCall(JSGraph* jsgraph, Node* call);
+ Reduction InlineAtCall(JSGraph* jsgraph, Node* call);
// Ensure that only a single return reaches the end node.
static void UnifyReturn(JSGraph* jsgraph);
@@ -182,7 +204,7 @@ class CopyVisitor : public NullNodeVisitor {
if (copy == NULL) {
copy = GetSentinel(original);
}
- DCHECK_NE(NULL, copy);
+ DCHECK(copy);
return copy;
}
@@ -199,7 +221,7 @@ class CopyVisitor : public NullNodeVisitor {
Node* sentinel = sentinels_[id];
if (sentinel == NULL) continue;
Node* copy = copies_[id];
- DCHECK_NE(NULL, copy);
+ DCHECK(copy);
sentinel->ReplaceUses(copy);
}
}
@@ -220,7 +242,7 @@ class CopyVisitor : public NullNodeVisitor {
};
-void Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
+Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
// The scheduler is smart enough to place our code; we just ensure {control}
// becomes the control input of the start of the inlinee.
Node* control = NodeProperties::GetControlInput(call);
@@ -274,42 +296,10 @@ void Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
}
NodeProperties::ReplaceWithValue(call, value_output(), effect_output());
- call->RemoveAllInputs();
- DCHECK_EQ(0, call->UseCount());
+ return Reducer::Replace(value_output());
}
-// TODO(turbofan) Provide such accessors for every node, possibly even
-// generate them.
-class JSCallFunctionAccessor {
- public:
- explicit JSCallFunctionAccessor(Node* call) : call_(call) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, call->opcode());
- }
-
- Node* jsfunction() { return call_->InputAt(0); }
-
- Node* receiver() { return call_->InputAt(1); }
-
- Node* formal_argument(size_t index) {
- DCHECK(index < formal_arguments());
- return call_->InputAt(static_cast<int>(2 + index));
- }
-
- size_t formal_arguments() {
- // {value_inputs} includes jsfunction and receiver.
- size_t value_inputs = call_->op()->ValueInputCount();
- DCHECK_GE(call_->InputCount(), 2);
- return value_inputs - 2;
- }
-
- Node* frame_state() { return NodeProperties::GetFrameStateInput(call_); }
-
- private:
- Node* call_;
-};
-
-
void JSInliner::AddClosureToFrameState(Node* frame_state,
Handle<JSFunction> jsfunction) {
FrameStateCallInfo call_info = OpParameter<FrameStateCallInfo>(frame_state);
@@ -343,32 +333,15 @@ Node* JSInliner::CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
}
-void JSInliner::TryInlineJSCall(Node* call_node) {
+Reduction JSInliner::TryInlineJSCall(Node* call_node,
+ Handle<JSFunction> function) {
JSCallFunctionAccessor call(call_node);
-
- HeapObjectMatcher<JSFunction> match(call.jsfunction());
- if (!match.HasValue()) {
- return;
- }
-
- Handle<JSFunction> function = match.Value().handle();
-
- if (function->shared()->native()) {
- if (FLAG_trace_turbo_inlining) {
- SmartArrayPointer<char> name =
- function->shared()->DebugName()->ToCString();
- PrintF("Not Inlining %s into %s because inlinee is native\n", name.get(),
- info_->shared_info()->DebugName()->ToCString().get());
- }
- return;
- }
-
CompilationInfoWithZone info(function);
- // TODO(wingo): ParseAndAnalyze can fail due to stack overflow.
- CHECK(Compiler::ParseAndAnalyze(&info));
- CHECK(Compiler::EnsureDeoptimizationSupport(&info));
- if (info.scope()->arguments() != NULL && info.strict_mode() != STRICT) {
+ if (!Compiler::ParseAndAnalyze(&info)) return NoChange();
+ if (!Compiler::EnsureDeoptimizationSupport(&info)) return NoChange();
+
+ if (info.scope()->arguments() != NULL && is_sloppy(info.language_mode())) {
// For now do not inline functions that use their arguments array.
SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
if (FLAG_trace_turbo_inlining) {
@@ -377,7 +350,7 @@ void JSInliner::TryInlineJSCall(Node* call_node) {
"array\n",
name.get(), info_->shared_info()->DebugName()->ToCString().get());
}
- return;
+ return NoChange();
}
if (FLAG_trace_turbo_inlining) {
@@ -387,11 +360,11 @@ void JSInliner::TryInlineJSCall(Node* call_node) {
}
Graph graph(info.zone());
- JSGraph jsgraph(&graph, jsgraph_->common(), jsgraph_->javascript(),
- jsgraph_->machine());
+ JSGraph jsgraph(info.isolate(), &graph, jsgraph_->common(),
+ jsgraph_->javascript(), jsgraph_->machine());
AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
- graph_builder.CreateGraph();
+ graph_builder.CreateGraph(false);
Inlinee::UnifyReturn(&jsgraph);
CopyVisitor visitor(&graph, jsgraph_->graph(), info.zone());
@@ -407,83 +380,17 @@ void JSInliner::TryInlineJSCall(Node* call_node) {
CreateArgumentsAdaptorFrameState(&call, function, info.zone());
}
- for (NodeVectorConstIter it = visitor.copies().begin();
- it != visitor.copies().end(); ++it) {
- Node* node = *it;
- if (node != NULL && node->opcode() == IrOpcode::kFrameState) {
+ for (Node* node : visitor.copies()) {
+ if (node && node->opcode() == IrOpcode::kFrameState) {
AddClosureToFrameState(node, function);
NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
}
}
}
- inlinee.InlineAtCall(jsgraph_, call_node);
+ return inlinee.InlineAtCall(jsgraph_, call_node);
}
-
-class JSCallRuntimeAccessor {
- public:
- explicit JSCallRuntimeAccessor(Node* call) : call_(call) {
- DCHECK_EQ(IrOpcode::kJSCallRuntime, call->opcode());
- }
-
- Node* formal_argument(size_t index) {
- DCHECK(index < formal_arguments());
- return call_->InputAt(static_cast<int>(index));
- }
-
- size_t formal_arguments() {
- size_t value_inputs = call_->op()->ValueInputCount();
- return value_inputs;
- }
-
- Node* frame_state() const {
- return NodeProperties::GetFrameStateInput(call_);
- }
- Node* context() const { return NodeProperties::GetContextInput(call_); }
- Node* control() const { return NodeProperties::GetControlInput(call_); }
- Node* effect() const { return NodeProperties::GetEffectInput(call_); }
-
- const Runtime::Function* function() const {
- return Runtime::FunctionForId(CallRuntimeParametersOf(call_->op()).id());
- }
-
- NodeVector inputs(Zone* zone) const {
- NodeVector inputs(zone);
- for (Node* const node : call_->inputs()) {
- inputs.push_back(node);
- }
- return inputs;
- }
-
- private:
- Node* call_;
-};
-
-
-void JSInliner::TryInlineRuntimeCall(Node* call_node) {
- JSCallRuntimeAccessor call(call_node);
- const Runtime::Function* f = call.function();
-
- if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) {
- return;
- }
-
- JSIntrinsicBuilder intrinsic_builder(jsgraph_);
-
- ResultAndEffect r = intrinsic_builder.BuildGraphFor(
- f->function_id, call.inputs(jsgraph_->zone()));
-
- if (r.first != NULL) {
- if (FLAG_trace_turbo_inlining) {
- PrintF("Inlining %s into %s\n", f->name,
- info_->shared_info()->DebugName()->ToCString().get());
- }
- NodeProperties::ReplaceWithValue(call_node, r.first, r.second);
- call_node->RemoveAllInputs();
- DCHECK_EQ(0, call_node->UseCount());
- }
-}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index eef29d6a74..8a4e0c1780 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_JS_INLINING_H_
#include "src/compiler/js-graph.h"
-#include "src/v8.h"
+#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
@@ -14,14 +14,12 @@ namespace compiler {
class JSCallFunctionAccessor;
-class JSInliner {
+class JSInliner FINAL : public Reducer {
public:
JSInliner(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph)
: local_zone_(local_zone), info_(info), jsgraph_(jsgraph) {}
- void Inline();
- void TryInlineJSCall(Node* node);
- void TryInlineRuntimeCall(Node* node);
+ Reduction Reduce(Node* node) OVERRIDE;
private:
friend class InlinerVisitor;
@@ -33,6 +31,7 @@ class JSInliner {
Handle<JSFunction> jsfunction,
Zone* temp_zone);
void AddClosureToFrameState(Node* frame_state, Handle<JSFunction> jsfunction);
+ Reduction TryInlineJSCall(Node* node, Handle<JSFunction> jsfunction);
static void UnifyReturn(Graph* graph);
};
}
diff --git a/deps/v8/src/compiler/js-intrinsic-builder.cc b/deps/v8/src/compiler/js-intrinsic-builder.cc
deleted file mode 100644
index 80b69682ca..0000000000
--- a/deps/v8/src/compiler/js-intrinsic-builder.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/access-builder.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/diamond.h"
-#include "src/compiler/js-intrinsic-builder.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/simplified-operator.h"
-
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-ResultAndEffect JSIntrinsicBuilder::BuildGraphFor(Runtime::FunctionId id,
- const NodeVector& arguments) {
- switch (id) {
- case Runtime::kInlineIsSmi:
- return BuildGraphFor_IsSmi(arguments);
- case Runtime::kInlineIsNonNegativeSmi:
- return BuildGraphFor_IsNonNegativeSmi(arguments);
- case Runtime::kInlineIsArray:
- return BuildMapCheck(arguments[0], arguments[2], JS_ARRAY_TYPE);
- case Runtime::kInlineIsRegExp:
- return BuildMapCheck(arguments[0], arguments[2], JS_REGEXP_TYPE);
- case Runtime::kInlineIsFunction:
- return BuildMapCheck(arguments[0], arguments[2], JS_FUNCTION_TYPE);
- case Runtime::kInlineValueOf:
- return BuildGraphFor_ValueOf(arguments);
- default:
- break;
- }
- return ResultAndEffect();
-}
-
-ResultAndEffect JSIntrinsicBuilder::BuildGraphFor_IsSmi(
- const NodeVector& arguments) {
- Node* object = arguments[0];
- SimplifiedOperatorBuilder simplified(jsgraph_->zone());
- Node* condition = graph()->NewNode(simplified.ObjectIsSmi(), object);
-
- return ResultAndEffect(condition, arguments[2]);
-}
-
-
-ResultAndEffect JSIntrinsicBuilder::BuildGraphFor_IsNonNegativeSmi(
- const NodeVector& arguments) {
- Node* object = arguments[0];
- SimplifiedOperatorBuilder simplified(jsgraph_->zone());
- Node* condition =
- graph()->NewNode(simplified.ObjectIsNonNegativeSmi(), object);
-
- return ResultAndEffect(condition, arguments[2]);
-}
-
-
-/*
- * if (_isSmi(object)) {
- * return false
- * } else {
- * return %_GetMapInstanceType(object) == map_type
- * }
- */
-ResultAndEffect JSIntrinsicBuilder::BuildMapCheck(Node* object, Node* effect,
- InstanceType map_type) {
- SimplifiedOperatorBuilder simplified(jsgraph_->zone());
-
- Node* is_smi = graph()->NewNode(simplified.ObjectIsSmi(), object);
- Diamond d(graph(), common(), is_smi);
-
- Node* map = graph()->NewNode(simplified.LoadField(AccessBuilder::ForMap()),
- object, effect, d.if_false);
-
- Node* instance_type = graph()->NewNode(
- simplified.LoadField(AccessBuilder::ForMapInstanceType()), map, map,
- d.if_false);
-
- Node* has_map_type =
- graph()->NewNode(jsgraph_->machine()->Word32Equal(), instance_type,
- jsgraph_->Int32Constant(map_type));
-
- Node* phi = d.Phi(static_cast<MachineType>(kTypeBool | kRepTagged),
- jsgraph_->FalseConstant(), has_map_type);
-
- Node* ephi = d.EffectPhi(effect, instance_type);
-
- return ResultAndEffect(phi, ephi);
-}
-
-
-/*
- * if (%_isSmi(object)) {
- * return object;
- * } else if (%_GetMapInstanceType(object) == JS_VALUE_TYPE) {
- * return %_LoadValueField(object);
- * } else {
- * return object;
- * }
- */
-ResultAndEffect JSIntrinsicBuilder::BuildGraphFor_ValueOf(
- const NodeVector& arguments) {
- Node* object = arguments[0];
- Node* effect = arguments[2];
- SimplifiedOperatorBuilder simplified(jsgraph_->zone());
-
- Node* is_smi = graph()->NewNode(simplified.ObjectIsSmi(), object);
-
- Diamond if_is_smi(graph(), common(), is_smi);
-
- Node* map = graph()->NewNode(simplified.LoadField(AccessBuilder::ForMap()),
- object, effect, if_is_smi.if_false);
-
- Node* instance_type = graph()->NewNode(
- simplified.LoadField(AccessBuilder::ForMapInstanceType()), map, map,
- if_is_smi.if_false);
-
- Node* is_value =
- graph()->NewNode(jsgraph_->machine()->Word32Equal(), instance_type,
- jsgraph_->Constant(JS_VALUE_TYPE));
-
- Diamond if_is_value(graph(), common(), is_value);
- if_is_value.Nest(if_is_smi, false);
-
- Node* value =
- graph()->NewNode(simplified.LoadField(AccessBuilder::ForValue()), object,
- instance_type, if_is_value.if_true);
-
- Node* phi_is_value = if_is_value.Phi(kTypeAny, value, object);
-
- Node* phi = if_is_smi.Phi(kTypeAny, object, phi_is_value);
-
- Node* ephi = if_is_smi.EffectPhi(effect, instance_type);
-
- return ResultAndEffect(phi, ephi);
-}
-}
-}
-} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/js-intrinsic-builder.h b/deps/v8/src/compiler/js-intrinsic-builder.h
deleted file mode 100644
index 9336be6559..0000000000
--- a/deps/v8/src/compiler/js-intrinsic-builder.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_INTRINSIC_BUILDER_H_
-#define V8_COMPILER_JS_INTRINSIC_BUILDER_H_
-
-#include "src/compiler/js-graph.h"
-#include "src/v8.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-typedef std::pair<Node*, Node*> ResultAndEffect;
-
-class JSIntrinsicBuilder {
- public:
- explicit JSIntrinsicBuilder(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
-
- ResultAndEffect BuildGraphFor(Runtime::FunctionId id,
- const NodeVector& arguments);
-
- private:
- ResultAndEffect BuildMapCheck(Node* object, Node* effect,
- InstanceType map_type);
- ResultAndEffect BuildGraphFor_IsSmi(const NodeVector& arguments);
- ResultAndEffect BuildGraphFor_IsNonNegativeSmi(const NodeVector& arguments);
- ResultAndEffect BuildGraphFor_ValueOf(const NodeVector& arguments);
-
-
- Graph* graph() const { return jsgraph_->graph(); }
- CommonOperatorBuilder* common() const { return jsgraph_->common(); }
- JSGraph* jsgraph_;
-};
-}
-}
-} // namespace v8::internal::compiler
-
-#endif // V8_COMPILER_JS_INTRINSIC_BUILDER_H_
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
new file mode 100644
index 0000000000..a1e693585b
--- /dev/null
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -0,0 +1,194 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-intrinsic-lowering.h"
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+JSIntrinsicLowering::JSIntrinsicLowering(JSGraph* jsgraph)
+ : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+
+
+Reduction JSIntrinsicLowering::Reduce(Node* node) {
+ if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange();
+ const Runtime::Function* const f =
+ Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id());
+ if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) return NoChange();
+ switch (f->function_id) {
+ case Runtime::kInlineIsSmi:
+ return ReduceInlineIsSmi(node);
+ case Runtime::kInlineIsNonNegativeSmi:
+ return ReduceInlineIsNonNegativeSmi(node);
+ case Runtime::kInlineIsArray:
+ return ReduceInlineIsInstanceType(node, JS_ARRAY_TYPE);
+ case Runtime::kInlineIsFunction:
+ return ReduceInlineIsInstanceType(node, JS_FUNCTION_TYPE);
+ case Runtime::kInlineIsRegExp:
+ return ReduceInlineIsInstanceType(node, JS_REGEXP_TYPE);
+ case Runtime::kInlineValueOf:
+ return ReduceInlineValueOf(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSIntrinsicLowering::ReduceInlineIsSmi(Node* node) {
+ return Change(node, simplified()->ObjectIsSmi());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceInlineIsNonNegativeSmi(Node* node) {
+ return Change(node, simplified()->ObjectIsNonNegativeSmi());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceInlineIsInstanceType(
+ Node* node, InstanceType instance_type) {
+ // if (%_IsSmi(value)) {
+ // return false;
+ // } else {
+ // return %_GetInstanceType(%_GetMap(value)) == instance_type;
+ // }
+ MachineType const type = static_cast<MachineType>(kTypeBool | kRepTagged);
+
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), value,
+ effect, if_false),
+ effect, if_false);
+ Node* vfalse = graph()->NewNode(machine()->Word32Equal(), efalse,
+ jsgraph()->Int32Constant(instance_type));
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+
+ // Replace all effect uses of {node} with the {ephi}.
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
+ NodeProperties::ReplaceWithValue(node, node, ephi);
+
+ // Turn the {node} into a Phi.
+ return Change(node, common()->Phi(type, 2), vtrue, vfalse, merge);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceInlineValueOf(Node* node) {
+ // if (%_IsSmi(value)) {
+ // return value;
+ // } else if (%_GetInstanceType(%_GetMap(value)) == JS_VALUE_TYPE) {
+ // return %_GetValue(value);
+ // } else {
+ // return value;
+ // }
+ const Operator* const merge_op = common()->Merge(2);
+ const Operator* const ephi_op = common()->EffectPhi(2);
+ const Operator* const phi_op = common()->Phi(kMachAnyTagged, 2);
+
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = value;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0;
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, if_false0),
+ effect, if_false0),
+ jsgraph()->Int32Constant(JS_VALUE_TYPE));
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForValue()),
+ value, effect, if_true1);
+ Node* vtrue1 = etrue1;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = effect;
+ Node* vfalse1 = value;
+
+ Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
+ efalse0 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
+ vfalse0 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
+ }
+
+ Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+
+
+ // Replace all effect uses of {node} with the {ephi0}.
+ Node* ephi0 = graph()->NewNode(ephi_op, etrue0, efalse0, merge0);
+ NodeProperties::ReplaceWithValue(node, node, ephi0);
+
+ // Turn the {node} into a Phi.
+ return Change(node, phi_op, vtrue0, vfalse0, merge0);
+}
+
+
+Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
+ // Remove the effects from the node and update its effect usages.
+ NodeProperties::ReplaceWithValue(node, node);
+ // Remove the inputs corresponding to context, effect and control.
+ NodeProperties::RemoveNonValueInputs(node);
+ // Finally update the operator to the new one.
+ node->set_op(op);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
+ Node* b, Node* c) {
+ node->set_op(op);
+ node->ReplaceInput(0, a);
+ node->ReplaceInput(1, b);
+ node->ReplaceInput(2, c);
+ node->TrimInputCount(3);
+ return Changed(node);
+}
+
+
+Graph* JSIntrinsicLowering::graph() const { return jsgraph()->graph(); }
+
+
+CommonOperatorBuilder* JSIntrinsicLowering::common() const {
+ return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* JSIntrinsicLowering::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
new file mode 100644
index 0000000000..bc188caa06
--- /dev/null
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -0,0 +1,52 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_INTRINSIC_LOWERING_H_
+#define V8_COMPILER_JS_INTRINSIC_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class MachineOperatorBuilder;
+
+
+// Lowers certain JS-level runtime calls.
+class JSIntrinsicLowering FINAL : public Reducer {
+ public:
+ explicit JSIntrinsicLowering(JSGraph* jsgraph);
+ ~JSIntrinsicLowering() FINAL {}
+
+ Reduction Reduce(Node* node) FINAL;
+
+ private:
+ Reduction ReduceInlineIsSmi(Node* node);
+ Reduction ReduceInlineIsNonNegativeSmi(Node* node);
+ Reduction ReduceInlineIsInstanceType(Node* node, InstanceType instance_type);
+ Reduction ReduceInlineValueOf(Node* node);
+
+ Reduction Change(Node* node, const Operator* op);
+ Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
+
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ JSGraph* jsgraph_;
+ SimplifiedOperatorBuilder simplified_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_INTRINSIC_LOWERING_H_
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index aa76a3b6f5..72c39697b4 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -182,7 +182,7 @@ const LoadNamedParameters& LoadNamedParametersOf(const Operator* op) {
bool operator==(StoreNamedParameters const& lhs,
StoreNamedParameters const& rhs) {
- return lhs.strict_mode() == rhs.strict_mode() && lhs.name() == rhs.name();
+ return lhs.language_mode() == rhs.language_mode() && lhs.name() == rhs.name();
}
@@ -193,12 +193,12 @@ bool operator!=(StoreNamedParameters const& lhs,
size_t hash_value(StoreNamedParameters const& p) {
- return base::hash_combine(p.strict_mode(), p.name());
+ return base::hash_combine(p.language_mode(), p.name());
}
std::ostream& operator<<(std::ostream& os, StoreNamedParameters const& p) {
- return os << p.strict_mode() << ", " << Brief(*p.name().handle());
+ return os << p.language_mode() << ", " << Brief(*p.name().handle());
}
@@ -260,12 +260,12 @@ struct JSOperatorGlobalCache FINAL {
CACHED_OP_LIST(CACHED)
#undef CACHED
- template <StrictMode kStrictMode>
- struct StorePropertyOperator FINAL : public Operator1<StrictMode> {
+ template <LanguageMode kLanguageMode>
+ struct StorePropertyOperator FINAL : public Operator1<LanguageMode> {
StorePropertyOperator()
- : Operator1<StrictMode>(IrOpcode::kJSStoreProperty,
- Operator::kNoProperties, "JSStoreProperty", 3,
- 1, 1, 0, 1, 0, kStrictMode) {}
+ : Operator1<LanguageMode>(IrOpcode::kJSStoreProperty,
+ Operator::kNoProperties, "JSStoreProperty", 3,
+ 1, 1, 0, 1, 0, kLanguageMode) {}
};
StorePropertyOperator<SLOPPY> kStorePropertySloppyOperator;
StorePropertyOperator<STRICT> kStorePropertyStrictOperator;
@@ -344,21 +344,20 @@ const Operator* JSOperatorBuilder::LoadProperty(
}
-const Operator* JSOperatorBuilder::StoreProperty(StrictMode strict_mode) {
- switch (strict_mode) {
- case SLOPPY:
- return &cache_.kStorePropertySloppyOperator;
- case STRICT:
- return &cache_.kStorePropertyStrictOperator;
+const Operator* JSOperatorBuilder::StoreProperty(LanguageMode language_mode) {
+ if (is_strict(language_mode)) {
+ return &cache_.kStorePropertyStrictOperator;
+ } else {
+ return &cache_.kStorePropertySloppyOperator;
}
UNREACHABLE();
return nullptr;
}
-const Operator* JSOperatorBuilder::StoreNamed(StrictMode strict_mode,
+const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
const Unique<Name>& name) {
- StoreNamedParameters parameters(strict_mode, name);
+ StoreNamedParameters parameters(language_mode, name);
return new (zone()) Operator1<StoreNamedParameters>( // --
IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode
"JSStoreNamed", // name
@@ -367,12 +366,12 @@ const Operator* JSOperatorBuilder::StoreNamed(StrictMode strict_mode,
}
-const Operator* JSOperatorBuilder::DeleteProperty(StrictMode strict_mode) {
- return new (zone()) Operator1<StrictMode>( // --
+const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
+ return new (zone()) Operator1<LanguageMode>( // --
IrOpcode::kJSDeleteProperty, Operator::kNoProperties, // opcode
"JSDeleteProperty", // name
2, 1, 1, 1, 1, 0, // counts
- strict_mode); // parameter
+ language_mode); // parameter
}
@@ -402,7 +401,7 @@ const Operator* JSOperatorBuilder::CreateCatchContext(
return new (zone()) Operator1<Unique<String>>( // --
IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode
"JSCreateCatchContext", // name
- 1, 1, 1, 1, 1, 0, // counts
+ 2, 1, 1, 1, 1, 0, // counts
name); // parameter
}
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index e716a8eeaa..e7fc04c1e1 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -171,14 +171,14 @@ const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op);
// used as a parameter by JSStoreNamed operators.
class StoreNamedParameters FINAL {
public:
- StoreNamedParameters(StrictMode strict_mode, const Unique<Name>& name)
- : strict_mode_(strict_mode), name_(name) {}
+ StoreNamedParameters(LanguageMode language_mode, const Unique<Name>& name)
+ : language_mode_(language_mode), name_(name) {}
- StrictMode strict_mode() const { return strict_mode_; }
+ LanguageMode language_mode() const { return language_mode_; }
const Unique<Name>& name() const { return name_; }
private:
- const StrictMode strict_mode_;
+ const LanguageMode language_mode_;
const Unique<Name> name_;
};
@@ -239,10 +239,11 @@ class JSOperatorBuilder FINAL : public ZoneObject {
const VectorSlotPair& feedback,
ContextualMode contextual_mode = NOT_CONTEXTUAL);
- const Operator* StoreProperty(StrictMode strict_mode);
- const Operator* StoreNamed(StrictMode strict_mode, const Unique<Name>& name);
+ const Operator* StoreProperty(LanguageMode language_mode);
+ const Operator* StoreNamed(LanguageMode language_mode,
+ const Unique<Name>& name);
- const Operator* DeleteProperty(StrictMode strict_mode);
+ const Operator* DeleteProperty(LanguageMode language_mode);
const Operator* HasProperty();
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 761837576b..bbe46fb029 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -3,12 +3,11 @@
// found in the LICENSE file.
#include "src/compiler/access-builder.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-typed-lowering.h"
-#include "src/compiler/node-aux-data-inl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
#include "src/types.h"
namespace v8 {
@@ -31,26 +30,23 @@ static void RelaxEffects(Node* node) {
JSTypedLowering::JSTypedLowering(JSGraph* jsgraph, Zone* zone)
: jsgraph_(jsgraph), simplified_(graph()->zone()), conversions_(zone) {
- Handle<Object> zero = factory()->NewNumber(0.0);
- Handle<Object> one = factory()->NewNumber(1.0);
- zero_range_ = Type::Range(zero, zero, graph()->zone());
- one_range_ = Type::Range(one, one, graph()->zone());
- Handle<Object> thirtyone = factory()->NewNumber(31.0);
- zero_thirtyone_range_ = Type::Range(zero, thirtyone, graph()->zone());
+ zero_range_ = Type::Range(0.0, 1.0, graph()->zone());
+ one_range_ = Type::Range(1.0, 1.0, graph()->zone());
+ zero_thirtyone_range_ = Type::Range(0.0, 31.0, graph()->zone());
// TODO(jarin): Can we have a correctification of the stupid type system?
// These stupid work-arounds are just stupid!
shifted_int32_ranges_[0] = Type::Signed32();
if (SmiValuesAre31Bits()) {
shifted_int32_ranges_[1] = Type::SignedSmall();
for (size_t k = 2; k < arraysize(shifted_int32_ranges_); ++k) {
- Handle<Object> min = factory()->NewNumber(kMinInt / (1 << k));
- Handle<Object> max = factory()->NewNumber(kMaxInt / (1 << k));
+ double min = kMinInt / (1 << k);
+ double max = kMaxInt / (1 << k);
shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
}
} else {
for (size_t k = 1; k < arraysize(shifted_int32_ranges_); ++k) {
- Handle<Object> min = factory()->NewNumber(kMinInt / (1 << k));
- Handle<Object> max = factory()->NewNumber(kMaxInt / (1 << k));
+ double min = kMinInt / (1 << k);
+ double max = kMaxInt / (1 << k);
shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
}
}
@@ -198,8 +194,16 @@ class JSBinopReduction FINAL {
if (NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive())) {
return lowering_->ConvertToNumber(node);
}
- Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(),
- effect(), control());
+ // TODO(jarin) This ToNumber conversion can deoptimize, but we do not really
+ // have a frame state to deoptimize to. Either we provide such a frame state
+ // or we exclude the values that could lead to deoptimization (e.g., by
+ // triggering eager deopt if the value is not plain).
+ Node* const n = FLAG_turbo_deoptimization
+ ? graph()->NewNode(
+ javascript()->ToNumber(), node, context(),
+ jsgraph()->EmptyFrameState(), effect(), control())
+ : graph()->NewNode(javascript()->ToNumber(), node,
+ context(), effect(), control());
update_effect(n);
return n;
}
@@ -267,13 +271,15 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Reduction JSTypedLowering::ReduceJSBitwiseOr(Node* node) {
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Primitive()) || r.OneInputIs(zero_range_)) {
- // TODO(jarin): Propagate frame state input from non-primitive input node to
- // JSToNumber node.
+
+ // We can only reduce to Word32Or if we are sure the to-number conversions
+ // cannot lazily deoptimize.
+ bool shortcut_or_zero =
+ !FLAG_turbo_deoptimization && r.OneInputIs(zero_range_);
+ if (r.BothInputsAre(Type::Primitive()) || shortcut_or_zero) {
// TODO(titzer): some Smi bitwise operations don't really require going
// all the way to int32, which can save tagging/untagging for some
- // operations
- // on some platforms.
+ // operations on some platforms.
// TODO(turbofan): make this heuristic configurable for code size.
r.ConvertInputsToUI32(kSigned, kSigned);
return r.ChangeToPureOperator(machine()->Word32Or(), Type::Integral32());
@@ -284,9 +290,13 @@ Reduction JSTypedLowering::ReduceJSBitwiseOr(Node* node) {
Reduction JSTypedLowering::ReduceJSMultiply(Node* node) {
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Primitive()) || r.OneInputIs(one_range_)) {
- // TODO(jarin): Propagate frame state input from non-primitive input node to
- // JSToNumber node.
+
+ // We can only reduce to NumberMultiply if we are sure the to-number
+ // conversions cannot lazily deoptimize.
+ bool shortcut_multiply_one =
+ !FLAG_turbo_deoptimization && r.OneInputIs(one_range_);
+
+ if (r.BothInputsAre(Type::Primitive()) || shortcut_multiply_one) {
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberMultiply(),
Type::Number());
@@ -624,15 +634,20 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
}
// Remember this conversion.
InsertConversion(node);
- if (node->InputAt(1) != jsgraph()->NoContextConstant() ||
- node->InputAt(2) != graph()->start() ||
- node->InputAt(3) != graph()->start()) {
+ if (NodeProperties::GetContextInput(node) !=
+ jsgraph()->NoContextConstant() ||
+ NodeProperties::GetEffectInput(node) != graph()->start() ||
+ NodeProperties::GetControlInput(node) != graph()->start()) {
// JSToNumber(x:plain-primitive,context,effect,control)
// => JSToNumber(x,no-context,start,start)
RelaxEffects(node);
- node->ReplaceInput(1, jsgraph()->NoContextConstant());
- node->ReplaceInput(2, graph()->start());
- node->ReplaceInput(3, graph()->start());
+ NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
+ NodeProperties::ReplaceControlInput(node, graph()->start());
+ NodeProperties::ReplaceEffectInput(node, graph()->start());
+ if (OperatorProperties::HasFrameStateInput(node->op())) {
+ NodeProperties::ReplaceFrameStateInput(node,
+ jsgraph()->EmptyFrameState());
+ }
return Changed(node);
}
}
@@ -752,8 +767,15 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
if (number_reduction.Changed()) {
value = number_reduction.replacement();
} else {
- value = effect = graph()->NewNode(javascript()->ToNumber(), value,
- context, effect, control);
+ if (OperatorProperties::HasFrameStateInput(
+ javascript()->ToNumber())) {
+ value = effect =
+ graph()->NewNode(javascript()->ToNumber(), value, context,
+ jsgraph()->EmptyFrameState(), effect, control);
+ } else {
+ value = effect = graph()->NewNode(javascript()->ToNumber(), value,
+ context, effect, control);
+ }
}
}
// For integer-typed arrays, convert to the integer type.
@@ -785,8 +807,8 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
node->ReplaceInput(2, length);
node->ReplaceInput(3, value);
node->ReplaceInput(4, effect);
- DCHECK_EQ(control, node->InputAt(5));
- DCHECK_EQ(6, node->InputCount());
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
return Changed(node);
}
}
@@ -838,8 +860,7 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
Reduction JSTypedLowering::Reduce(Node* node) {
// Check if the output type is a singleton. In that case we already know the
// result value and can simply replace the node if it's eliminable.
- if (NodeProperties::IsTyped(node) &&
- !IrOpcode::IsLeafOpcode(node->opcode()) &&
+ if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable)) {
Type* upper = NodeProperties::GetBounds(node).upper;
if (upper->IsConstant()) {
@@ -932,9 +953,16 @@ Node* JSTypedLowering::ConvertToNumber(Node* input) {
// Avoid inserting too many eager ToNumber() operations.
Reduction const reduction = ReduceJSToNumberInput(input);
if (reduction.Changed()) return reduction.replacement();
- Node* const conversion = graph()->NewNode(javascript()->ToNumber(), input,
- jsgraph()->NoContextConstant(),
- graph()->start(), graph()->start());
+ // TODO(jarin) Use PlainPrimitiveToNumber once we have it.
+ Node* const conversion =
+ FLAG_turbo_deoptimization
+ ? graph()->NewNode(javascript()->ToNumber(), input,
+ jsgraph()->NoContextConstant(),
+ jsgraph()->EmptyFrameState(), graph()->start(),
+ graph()->start())
+ : graph()->NewNode(javascript()->ToNumber(), input,
+ jsgraph()->NoContextConstant(), graph()->start(),
+ graph()->start());
InsertConversion(conversion);
return conversion;
}
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index f0bb731605..4242c957ed 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -140,7 +140,7 @@ void JumpThreading::ApplyForwarding(ZoneVector<RpoNumber>& result,
InstructionSequence* code) {
if (!FLAG_turbo_jt) return;
- Zone local_zone(code->zone()->isolate());
+ Zone local_zone;
ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
// Skip empty blocks when the previous block doesn't fall through.
diff --git a/deps/v8/src/compiler/linkage-impl.h b/deps/v8/src/compiler/linkage-impl.h
index c13bd74f40..abd0696837 100644
--- a/deps/v8/src/compiler/linkage-impl.h
+++ b/deps/v8/src/compiler/linkage-impl.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_LINKAGE_IMPL_H_
#include "src/code-stubs.h"
+#include "src/compiler/osr.h"
namespace v8 {
namespace internal {
@@ -28,7 +29,8 @@ class LinkageHelper {
}
// TODO(turbofan): cache call descriptors for JSFunction calls.
- static CallDescriptor* GetJSCallDescriptor(Zone* zone, int js_parameter_count,
+ static CallDescriptor* GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int js_parameter_count,
CallDescriptor::Flags flags) {
const size_t return_count = 1;
const size_t context_count = 1;
@@ -55,7 +57,12 @@ class LinkageHelper {
// The target for JS function calls is the JSFunction object.
MachineType target_type = kMachAnyTagged;
- LinkageLocation target_loc = regloc(LinkageTraits::JSCallFunctionReg());
+ // Unoptimized code doesn't preserve the JSCallFunctionReg, so expect the
+ // closure on the stack.
+ LinkageLocation target_loc =
+ is_osr ? stackloc(Linkage::kJSFunctionCallClosureParamIndex -
+ js_parameter_count)
+ : regloc(LinkageTraits::JSCallFunctionReg());
return new (zone) CallDescriptor( // --
CallDescriptor::kCallJSFunction, // kind
target_type, // target MachineType
@@ -133,7 +140,7 @@ class LinkageHelper {
// TODO(turbofan): cache call descriptors for code stub calls.
static CallDescriptor* GetStubCallDescriptor(
- Zone* zone, const CallInterfaceDescriptor& descriptor,
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties) {
const int register_parameter_count =
@@ -182,11 +189,11 @@ class LinkageHelper {
properties, // properties
kNoCalleeSaved, // callee-saved registers
flags, // flags
- descriptor.DebugName(zone->isolate()));
+ descriptor.DebugName(isolate));
}
- static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* msig) {
+ static CallDescriptor* GetSimplifiedCDescriptor(
+ Zone* zone, const MachineSignature* msig) {
LocationSignature::Builder locations(zone, msig->return_count(),
msig->parameter_count());
// Add return location(s).
@@ -226,6 +233,28 @@ class LinkageHelper {
return LinkageLocation(i);
}
};
+
+
+LinkageLocation Linkage::GetOsrValueLocation(int index) const {
+ CHECK(incoming_->IsJSFunctionCall());
+ int parameter_count = static_cast<int>(incoming_->JSParameterCount() - 1);
+ int first_stack_slot = OsrHelper::FirstStackSlotIndex(parameter_count);
+
+ if (index >= first_stack_slot) {
+ // Local variable stored in this (callee) stack.
+ int spill_index =
+ LinkageLocation::ANY_REGISTER + 1 + index - first_stack_slot;
+ // TODO(titzer): bailout instead of crashing here.
+ CHECK(spill_index <= LinkageLocation::MAX_STACK_SLOT);
+ return LinkageLocation(spill_index);
+ } else {
+ // Parameter. Use the assigned location from the incoming call descriptor.
+ int parameter_index = 1 + index; // skip index 0, which is the target.
+ return incoming_->GetInputLocation(parameter_index);
+ }
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index fc6b19e714..eedf9ed746 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -42,22 +42,25 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
if (info->function() != NULL) {
// If we already have the function literal, use the number of parameters
// plus the receiver.
- return GetJSCallDescriptor(1 + info->function()->parameter_count(), zone,
+ return GetJSCallDescriptor(zone, info->is_osr(),
+ 1 + info->function()->parameter_count(),
CallDescriptor::kNoFlags);
}
if (!info->closure().is_null()) {
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
SharedFunctionInfo* shared = info->closure()->shared();
- return GetJSCallDescriptor(1 + shared->formal_parameter_count(), zone,
+ return GetJSCallDescriptor(zone, info->is_osr(),
+ 1 + shared->internal_formal_parameter_count(),
CallDescriptor::kNoFlags);
}
if (info->code_stub() != NULL) {
// Use the code stub interface descriptor.
CallInterfaceDescriptor descriptor =
info->code_stub()->GetCallInterfaceDescriptor();
- return GetStubCallDescriptor(descriptor, 0, CallDescriptor::kNoFlags,
- Operator::kNoProperties, zone);
+ return GetStubCallDescriptor(info->isolate(), zone, descriptor, 0,
+ CallDescriptor::kNoFlags,
+ Operator::kNoProperties);
}
return NULL; // TODO(titzer): ?
}
@@ -91,131 +94,54 @@ FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame,
}
-CallDescriptor* Linkage::GetJSCallDescriptor(
- int parameter_count, CallDescriptor::Flags flags) const {
- return GetJSCallDescriptor(parameter_count, zone_, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) const {
- return GetRuntimeCallDescriptor(function, parameter_count, properties, zone_);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties) const {
- return GetStubCallDescriptor(descriptor, stack_parameter_count, flags,
- properties, zone_);
-}
-
-
// static
bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
if (!FLAG_turbo_deoptimization) {
return false;
}
- // TODO(jarin) At the moment, we only add frame state for
- // few chosen runtime functions.
+
+ // Most runtime functions need a FrameState. A few chosen ones that we know
+ // not to call into arbitrary JavaScript, not to throw, and not to deoptimize
+ // are blacklisted here and can be called without a FrameState.
switch (function) {
- case Runtime::kApply:
- case Runtime::kArrayBufferNeuter:
- case Runtime::kArrayConcat:
- case Runtime::kBasicJSONStringify:
- case Runtime::kCheckExecutionState:
- case Runtime::kCollectStackTrace:
- case Runtime::kCompileLazy:
- case Runtime::kCompileOptimized:
- case Runtime::kCompileString:
- case Runtime::kCreateObjectLiteral:
- case Runtime::kDebugBreak:
- case Runtime::kDataViewSetInt8:
- case Runtime::kDataViewSetUint8:
- case Runtime::kDataViewSetInt16:
- case Runtime::kDataViewSetUint16:
- case Runtime::kDataViewSetInt32:
- case Runtime::kDataViewSetUint32:
- case Runtime::kDataViewSetFloat32:
- case Runtime::kDataViewSetFloat64:
- case Runtime::kDataViewGetInt8:
- case Runtime::kDataViewGetUint8:
- case Runtime::kDataViewGetInt16:
- case Runtime::kDataViewGetUint16:
- case Runtime::kDataViewGetInt32:
- case Runtime::kDataViewGetUint32:
- case Runtime::kDataViewGetFloat32:
- case Runtime::kDataViewGetFloat64:
- case Runtime::kDebugEvaluate:
- case Runtime::kDebugEvaluateGlobal:
- case Runtime::kDebugGetLoadedScripts:
- case Runtime::kDebugGetPropertyDetails:
- case Runtime::kDebugPromiseEvent:
- case Runtime::kDefineAccessorPropertyUnchecked:
- case Runtime::kDefineDataPropertyUnchecked:
- case Runtime::kDeleteProperty:
- case Runtime::kDeoptimizeFunction:
- case Runtime::kFunctionBindArguments:
- case Runtime::kGetDefaultReceiver:
- case Runtime::kGetFrameCount:
- case Runtime::kGetOwnProperty:
- case Runtime::kGetOwnPropertyNames:
- case Runtime::kGetPropertyNamesFast:
- case Runtime::kGetPrototype:
+ case Runtime::kDeclareGlobals: // TODO(jarin): Is it safe?
+ case Runtime::kDefineClassMethod: // TODO(jarin): Is it safe?
+ case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
+ case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
+ case Runtime::kForInCacheArrayLength:
+ case Runtime::kForInInit:
+ case Runtime::kForInNext:
+ case Runtime::kNewArguments:
+ case Runtime::kNewClosure:
+ case Runtime::kNewFunctionContext:
+ case Runtime::kPushBlockContext:
+ case Runtime::kPushCatchContext:
+ case Runtime::kReThrow:
+ case Runtime::kSetProperty: // TODO(jarin): Is it safe?
+ case Runtime::kStringCompare:
+ case Runtime::kStringEquals:
+ case Runtime::kToFastProperties: // TODO(jarin): Is it safe?
+ case Runtime::kTraceEnter:
+ case Runtime::kTraceExit:
+ case Runtime::kTypeof:
+ case Runtime::kNewRestParamSlow:
+ return false;
case Runtime::kInlineArguments:
case Runtime::kInlineCallFunction:
case Runtime::kInlineDateField:
+ case Runtime::kInlineOptimizedGetPrototype:
case Runtime::kInlineRegExpExec:
- case Runtime::kInternalSetPrototype:
- case Runtime::kInterrupt:
- case Runtime::kIsPropertyEnumerable:
- case Runtime::kIsSloppyModeFunction:
- case Runtime::kLiveEditGatherCompileInfo:
- case Runtime::kLoadLookupSlot:
- case Runtime::kLoadLookupSlotNoReferenceError:
- case Runtime::kMaterializeRegExpLiteral:
- case Runtime::kNewObject:
- case Runtime::kNewObjectFromBound:
- case Runtime::kNewObjectWithAllocationSite:
- case Runtime::kObjectFreeze:
- case Runtime::kOwnKeys:
- case Runtime::kParseJson:
- case Runtime::kPrepareStep:
- case Runtime::kPreventExtensions:
- case Runtime::kPromiseRejectEvent:
- case Runtime::kPromiseRevokeReject:
- case Runtime::kRegExpInitializeAndCompile:
- case Runtime::kRegExpExecMultiple:
- case Runtime::kResolvePossiblyDirectEval:
- case Runtime::kRunMicrotasks:
- case Runtime::kSetPrototype:
- case Runtime::kSetScriptBreakPoint:
- case Runtime::kSparseJoinWithSeparator:
- case Runtime::kStackGuard:
- case Runtime::kStoreKeyedToSuper_Sloppy:
- case Runtime::kStoreKeyedToSuper_Strict:
- case Runtime::kStoreToSuper_Sloppy:
- case Runtime::kStoreToSuper_Strict:
- case Runtime::kStoreLookupSlot:
- case Runtime::kStringBuilderConcat:
- case Runtime::kStringBuilderJoin:
- case Runtime::kStringMatch:
- case Runtime::kStringReplaceGlobalRegExpWithString:
- case Runtime::kThrowNonMethodError:
- case Runtime::kThrowNotDateError:
- case Runtime::kThrowReferenceError:
- case Runtime::kThrowUnsupportedSuperError:
- case Runtime::kThrow:
- case Runtime::kTypedArraySetFastCases:
- case Runtime::kTypedArrayInitializeFromArrayLike:
-#ifdef V8_I18N_SUPPORT
- case Runtime::kGetImplFromInitializedIntlObject:
-#endif
return true;
default:
- return false;
+ break;
}
+
+ // Most inlined runtime functions (except the ones listed above) can be called
+ // without a FrameState or will be lowered by JSIntrinsicLowering internally.
+ const Runtime::Function* const f = Runtime::FunctionForId(function);
+ if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return false;
+
+ return true;
}
@@ -223,32 +149,39 @@ bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
// Provide unimplemented methods on unsupported architectures, to at least link.
//==============================================================================
#if !V8_TURBOFAN_BACKEND
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
CallDescriptor::Flags flags) {
UNIMPLEMENTED();
return NULL;
}
+LinkageLocation Linkage::GetOsrValueLocation(int index) const {
+ UNIMPLEMENTED();
+ return LinkageLocation(-1); // Dummy value
+}
+
+
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
UNIMPLEMENTED();
return NULL;
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties,
- Zone* zone) {
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
UNIMPLEMENTED();
return NULL;
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
+ const MachineSignature* sig) {
UNIMPLEMENTED();
return NULL;
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 0ad0761a0a..a3bdbf9d5b 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -9,6 +9,8 @@
#include "src/compiler/frame.h"
#include "src/compiler/machine-type.h"
#include "src/compiler/operator.h"
+#include "src/frames.h"
+#include "src/runtime/runtime.h"
#include "src/zone.h"
namespace v8 {
@@ -18,19 +20,26 @@ class CallInterfaceDescriptor;
namespace compiler {
+class OsrHelper;
+
// Describes the location for a parameter or a return value to a call.
class LinkageLocation {
public:
explicit LinkageLocation(int location) : location_(location) {}
- static const int16_t ANY_REGISTER = 32767;
+ static const int16_t ANY_REGISTER = 1023;
+ static const int16_t MAX_STACK_SLOT = 32767;
static LinkageLocation AnyRegister() { return LinkageLocation(ANY_REGISTER); }
private:
friend class CallDescriptor;
friend class OperandGenerator;
- int16_t location_; // >= 0 implies register, otherwise stack slot.
+ // location < 0 -> a stack slot on the caller frame
+ // 0 <= location < 1023 -> a specific machine register
+ // 1023 <= location < 1024 -> any machine register
+ // 1024 <= location -> a stack slot in the callee frame
+ int16_t location_;
};
typedef Signature<LinkageLocation> LocationSignature;
@@ -57,8 +66,9 @@ class CallDescriptor FINAL : public ZoneObject {
typedef base::Flags<Flag> Flags;
CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc,
- MachineSignature* machine_sig, LocationSignature* location_sig,
- size_t js_param_count, Operator::Properties properties,
+ const MachineSignature* machine_sig,
+ LocationSignature* location_sig, size_t js_param_count,
+ Operator::Properties properties,
RegList callee_saved_registers, Flags flags,
const char* debug_name = "")
: kind_(kind),
@@ -164,41 +174,31 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
class Linkage : public ZoneObject {
public:
- Linkage(Zone* zone, CompilationInfo* info)
- : zone_(zone), incoming_(ComputeIncoming(zone, info)) {}
- Linkage(Zone* zone, CallDescriptor* incoming)
- : zone_(zone), incoming_(incoming) {}
+ explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
static CallDescriptor* ComputeIncoming(Zone* zone, CompilationInfo* info);
// The call descriptor for this compilation unit describes the locations
// of incoming parameters and the outgoing return value(s).
CallDescriptor* GetIncomingDescriptor() const { return incoming_; }
- CallDescriptor* GetJSCallDescriptor(int parameter_count,
- CallDescriptor::Flags flags) const;
- static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone,
+ static CallDescriptor* GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
CallDescriptor::Flags flags);
- CallDescriptor* GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) const;
static CallDescriptor* GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone);
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties);
- CallDescriptor* GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count = 0,
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags,
- Operator::Properties properties = Operator::kNoProperties) const;
static CallDescriptor* GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone);
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties = Operator::kNoProperties);
// Creates a call descriptor for simplified C calls that is appropriate
// for the host platform. This simplified calling convention only supports
// integers and pointers of one word size each, i.e. no floating point,
// structs, pointers to members, etc.
static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig);
+ const MachineSignature* sig);
// Get the location of an (incoming) parameter to this function.
LinkageLocation GetParameterLocation(int index) const {
@@ -227,8 +227,13 @@ class Linkage : public ZoneObject {
static bool NeedsFrameState(Runtime::FunctionId function);
+ // Get the location where an incoming OSR value is stored.
+ LinkageLocation GetOsrValueLocation(int index) const;
+
+ // A special parameter index for JSCalls that represents the closure.
+ static const int kJSFunctionCallClosureParamIndex = -1;
+
private:
- Zone* const zone_;
CallDescriptor* const incoming_;
DISALLOW_COPY_AND_ASSIGN(Linkage);
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index fe0714ebc1..b76b187d5d 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -4,7 +4,7 @@
#include "src/compiler/load-elimination.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index e1b703e22b..17e4fd40ae 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -1,61 +1,31 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/graph.h"
#include "src/compiler/loop-analysis.h"
+
+#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node-properties.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
-typedef uint32_t LoopMarks;
-
+#define OFFSET(x) ((x)&0x1f)
+#define BIT(x) (1u << OFFSET(x))
+#define INDEX(x) ((x) >> 5)
// TODO(titzer): don't assume entry edges have a particular index.
-// TODO(titzer): use a BitMatrix to generalize this algorithm.
-static const size_t kMaxLoops = 31;
static const int kAssumedLoopEntryIndex = 0; // assume loops are entered here.
-static const LoopMarks kVisited = 1; // loop #0 is reserved.
// Temporary information for each node during marking.
struct NodeInfo {
Node* node;
NodeInfo* next; // link in chaining loop members
- LoopMarks forward; // accumulated marks in the forward direction
- LoopMarks backward; // accumulated marks in the backward direction
- LoopMarks loop_mark; // loop mark for header nodes; encodes loop_num
-
- bool MarkBackward(LoopMarks bw) {
- LoopMarks prev = backward;
- LoopMarks next = backward | bw;
- backward = next;
- return prev != next;
- }
-
- bool MarkForward(LoopMarks fw) {
- LoopMarks prev = forward;
- LoopMarks next = forward | fw;
- forward = next;
- return prev != next;
- }
-
- bool IsInLoop(size_t loop_num) {
- DCHECK(loop_num > 0 && loop_num <= 31);
- return forward & backward & (1 << loop_num);
- }
-
- bool IsLoopHeader() { return loop_mark != 0; }
- bool IsInAnyLoop() { return (forward & backward) > kVisited; }
-
- bool IsInHeaderForLoop(size_t loop_num) {
- DCHECK(loop_num > 0);
- return loop_mark == (kVisited | (1 << loop_num));
- }
};
@@ -68,9 +38,6 @@ struct LoopInfo {
};
-static const NodeInfo kEmptyNodeInfo = {nullptr, nullptr, 0, 0, 0};
-
-
// Encapsulation of the loop finding algorithm.
// -----------------------------------------------------------------------------
// Conceptually, the contents of a loop are those nodes that are "between" the
@@ -88,13 +55,18 @@ static const NodeInfo kEmptyNodeInfo = {nullptr, nullptr, 0, 0, 0};
class LoopFinderImpl {
public:
LoopFinderImpl(Graph* graph, LoopTree* loop_tree, Zone* zone)
- : end_(graph->end()),
+ : zone_(zone),
+ end_(graph->end()),
queue_(zone),
queued_(graph, 2),
- info_(graph->NodeCount(), kEmptyNodeInfo, zone),
+ info_(graph->NodeCount(), {nullptr, nullptr}, zone),
loops_(zone),
+ loop_num_(graph->NodeCount(), -1, zone),
loop_tree_(loop_tree),
- loops_found_(0) {}
+ loops_found_(0),
+ width_(0),
+ backward_(nullptr),
+ forward_(nullptr) {}
void Run() {
PropagateBackward();
@@ -106,12 +78,15 @@ class LoopFinderImpl {
// Print out the results.
for (NodeInfo& ni : info_) {
if (ni.node == nullptr) continue;
- for (size_t i = 1; i <= loops_.size(); i++) {
- if (ni.IsInLoop(i)) {
+ for (int i = 1; i <= loops_found_; i++) {
+ int index = ni.node->id() * width_ + INDEX(i);
+ bool marked_forward = forward_[index] & BIT(i);
+ bool marked_backward = backward_[index] & BIT(i);
+ if (marked_forward && marked_backward) {
PrintF("X");
- } else if (ni.forward & (1 << i)) {
+ } else if (marked_forward) {
PrintF("/");
- } else if (ni.backward & (1 << i)) {
+ } else if (marked_backward) {
PrintF("\\");
} else {
PrintF(" ");
@@ -132,157 +107,246 @@ class LoopFinderImpl {
}
private:
+ Zone* zone_;
Node* end_;
NodeDeque queue_;
NodeMarker<bool> queued_;
ZoneVector<NodeInfo> info_;
ZoneVector<LoopInfo> loops_;
+ ZoneVector<int> loop_num_;
LoopTree* loop_tree_;
- size_t loops_found_;
+ int loops_found_;
+ int width_;
+ uint32_t* backward_;
+ uint32_t* forward_;
+
+ int num_nodes() {
+ return static_cast<int>(loop_tree_->node_to_loop_num_.size());
+ }
+
+ // Tb = Tb | (Fb - loop_filter)
+ bool PropagateBackwardMarks(Node* from, Node* to, int loop_filter) {
+ if (from == to) return false;
+ uint32_t* fp = &backward_[from->id() * width_];
+ uint32_t* tp = &backward_[to->id() * width_];
+ bool change = false;
+ for (int i = 0; i < width_; i++) {
+ uint32_t mask = i == INDEX(loop_filter) ? ~BIT(loop_filter) : 0xFFFFFFFF;
+ uint32_t prev = tp[i];
+ uint32_t next = prev | (fp[i] & mask);
+ tp[i] = next;
+ if (!change && (prev != next)) change = true;
+ }
+ return change;
+ }
+
+ // Tb = Tb | B
+ bool SetBackwardMark(Node* to, int loop_num) {
+ uint32_t* tp = &backward_[to->id() * width_ + INDEX(loop_num)];
+ uint32_t prev = tp[0];
+ uint32_t next = prev | BIT(loop_num);
+ tp[0] = next;
+ return next != prev;
+ }
+
+ // Tf = Tf | B
+ bool SetForwardMark(Node* to, int loop_num) {
+ uint32_t* tp = &forward_[to->id() * width_ + INDEX(loop_num)];
+ uint32_t prev = tp[0];
+ uint32_t next = prev | BIT(loop_num);
+ tp[0] = next;
+ return next != prev;
+ }
+
+ // Tf = Tf | (Ff & Tb)
+ bool PropagateForwardMarks(Node* from, Node* to) {
+ if (from == to) return false;
+ bool change = false;
+ int findex = from->id() * width_;
+ int tindex = to->id() * width_;
+ for (int i = 0; i < width_; i++) {
+ uint32_t marks = backward_[tindex + i] & forward_[findex + i];
+ uint32_t prev = forward_[tindex + i];
+ uint32_t next = prev | marks;
+ forward_[tindex + i] = next;
+ if (!change && (prev != next)) change = true;
+ }
+ return change;
+ }
+
+ bool IsInLoop(Node* node, int loop_num) {
+ int offset = node->id() * width_ + INDEX(loop_num);
+ return backward_[offset] & forward_[offset] & BIT(loop_num);
+ }
// Propagate marks backward from loop headers.
void PropagateBackward() {
- PropagateBackward(end_, kVisited);
+ ResizeBackwardMarks();
+ SetBackwardMark(end_, 0);
+ Queue(end_);
while (!queue_.empty()) {
Node* node = queue_.front();
+ info(node);
queue_.pop_front();
queued_.Set(node, false);
+ int loop_num = -1;
// Setup loop headers first.
if (node->opcode() == IrOpcode::kLoop) {
// found the loop node first.
- CreateLoopInfo(node);
- } else if (node->opcode() == IrOpcode::kPhi ||
- node->opcode() == IrOpcode::kEffectPhi) {
+ loop_num = CreateLoopInfo(node);
+ } else if (NodeProperties::IsPhi(node)) {
// found a phi first.
Node* merge = node->InputAt(node->InputCount() - 1);
- if (merge->opcode() == IrOpcode::kLoop) CreateLoopInfo(merge);
+ if (merge->opcode() == IrOpcode::kLoop) {
+ loop_num = CreateLoopInfo(merge);
+ }
}
- // Propagate reachability marks backwards from this node.
- NodeInfo& ni = info(node);
- if (ni.IsLoopHeader()) {
- // Handle edges from loop header nodes specially.
- for (int i = 0; i < node->InputCount(); i++) {
- if (i == kAssumedLoopEntryIndex) {
- // Don't propagate the loop mark backwards on the entry edge.
- PropagateBackward(node->InputAt(0),
- kVisited | (ni.backward & ~ni.loop_mark));
- } else {
- // Only propagate the loop mark on backedges.
- PropagateBackward(node->InputAt(i), ni.loop_mark);
- }
- }
- } else {
- // Propagate all loop marks backwards for a normal node.
- for (Node* const input : node->inputs()) {
- PropagateBackward(input, ni.backward);
+ // Propagate marks backwards from this node.
+ for (int i = 0; i < node->InputCount(); i++) {
+ Node* input = node->InputAt(i);
+ if (loop_num > 0 && i != kAssumedLoopEntryIndex) {
+ // Only propagate the loop mark on backedges.
+ if (SetBackwardMark(input, loop_num)) Queue(input);
+ } else {
+ // Entry or normal edge. Propagate all marks except loop_num.
+ if (PropagateBackwardMarks(node, input, loop_num)) Queue(input);
}
}
}
}
- // Make a new loop header for the given node.
- void CreateLoopInfo(Node* node) {
- NodeInfo& ni = info(node);
- if (ni.IsLoopHeader()) return; // loop already set up.
+ // Make a new loop if necessary for the given node.
+ int CreateLoopInfo(Node* node) {
+ int loop_num = LoopNum(node);
+ if (loop_num > 0) return loop_num;
+
+ loop_num = ++loops_found_;
+ if (INDEX(loop_num) >= width_) ResizeBackwardMarks();
- loops_found_++;
- size_t loop_num = loops_.size() + 1;
- CHECK(loops_found_ <= kMaxLoops); // TODO(titzer): don't crash.
// Create a new loop.
loops_.push_back({node, nullptr, nullptr, nullptr});
loop_tree_->NewLoop();
- LoopMarks loop_mark = kVisited | (1 << loop_num);
- ni.node = node;
- ni.loop_mark = loop_mark;
+ SetBackwardMark(node, loop_num);
+ loop_tree_->node_to_loop_num_[node->id()] = loop_num;
// Setup loop mark for phis attached to loop header.
for (Node* use : node->uses()) {
- if (use->opcode() == IrOpcode::kPhi ||
- use->opcode() == IrOpcode::kEffectPhi) {
- info(use).loop_mark = loop_mark;
+ if (NodeProperties::IsPhi(use)) {
+ info(use); // create the NodeInfo
+ SetBackwardMark(use, loop_num);
+ loop_tree_->node_to_loop_num_[use->id()] = loop_num;
+ }
+ }
+
+ return loop_num;
+ }
+
+ void ResizeBackwardMarks() {
+ int new_width = width_ + 1;
+ int max = num_nodes();
+ uint32_t* new_backward = zone_->NewArray<uint32_t>(new_width * max);
+ memset(new_backward, 0, new_width * max * sizeof(uint32_t));
+ if (width_ > 0) { // copy old matrix data.
+ for (int i = 0; i < max; i++) {
+ uint32_t* np = &new_backward[i * new_width];
+ uint32_t* op = &backward_[i * width_];
+ for (int j = 0; j < width_; j++) np[j] = op[j];
}
}
+ width_ = new_width;
+ backward_ = new_backward;
+ }
+
+ void ResizeForwardMarks() {
+ int max = num_nodes();
+ forward_ = zone_->NewArray<uint32_t>(width_ * max);
+ memset(forward_, 0, width_ * max * sizeof(uint32_t));
}
// Propagate marks forward from loops.
void PropagateForward() {
+ ResizeForwardMarks();
for (LoopInfo& li : loops_) {
- queued_.Set(li.header, true);
- queue_.push_back(li.header);
- NodeInfo& ni = info(li.header);
- ni.forward = ni.loop_mark;
+ SetForwardMark(li.header, LoopNum(li.header));
+ Queue(li.header);
}
// Propagate forward on paths that were backward reachable from backedges.
while (!queue_.empty()) {
Node* node = queue_.front();
queue_.pop_front();
queued_.Set(node, false);
- NodeInfo& ni = info(node);
for (Edge edge : node->use_edges()) {
Node* use = edge.from();
- NodeInfo& ui = info(use);
- if (IsBackedge(use, ui, edge)) continue; // skip backedges.
- LoopMarks both = ni.forward & ui.backward;
- if (ui.MarkForward(both) && !queued_.Get(use)) {
- queued_.Set(use, true);
- queue_.push_back(use);
+ if (!IsBackedge(use, edge)) {
+ if (PropagateForwardMarks(node, use)) Queue(use);
}
}
}
}
- bool IsBackedge(Node* use, NodeInfo& ui, Edge& edge) {
- // TODO(titzer): checking for backedges here is ugly.
- if (!ui.IsLoopHeader()) return false;
+ bool IsBackedge(Node* use, Edge& edge) {
+ if (LoopNum(use) <= 0) return false;
if (edge.index() == kAssumedLoopEntryIndex) return false;
- if (use->opcode() == IrOpcode::kPhi ||
- use->opcode() == IrOpcode::kEffectPhi) {
+ if (NodeProperties::IsPhi(use)) {
return !NodeProperties::IsControlEdge(edge);
}
return true;
}
+ int LoopNum(Node* node) { return loop_tree_->node_to_loop_num_[node->id()]; }
+
NodeInfo& info(Node* node) {
NodeInfo& i = info_[node->id()];
if (i.node == nullptr) i.node = node;
return i;
}
- void PropagateBackward(Node* node, LoopMarks marks) {
- if (info(node).MarkBackward(marks) && !queued_.Get(node)) {
+ void Queue(Node* node) {
+ if (!queued_.Get(node)) {
queue_.push_back(node);
queued_.Set(node, true);
}
}
void FinishLoopTree() {
+ DCHECK(loops_found_ == static_cast<int>(loops_.size()));
+ DCHECK(loops_found_ == static_cast<int>(loop_tree_->all_loops_.size()));
+
// Degenerate cases.
- if (loops_.size() == 0) return;
- if (loops_.size() == 1) return FinishSingleLoop();
+ if (loops_found_ == 0) return;
+ if (loops_found_ == 1) return FinishSingleLoop();
- for (size_t i = 1; i <= loops_.size(); i++) ConnectLoopTree(i);
+ for (int i = 1; i <= loops_found_; i++) ConnectLoopTree(i);
size_t count = 0;
// Place the node into the innermost nested loop of which it is a member.
for (NodeInfo& ni : info_) {
- if (ni.node == nullptr || !ni.IsInAnyLoop()) continue;
+ if (ni.node == nullptr) continue;
LoopInfo* innermost = nullptr;
- size_t index = 0;
- for (size_t i = 1; i <= loops_.size(); i++) {
- if (ni.IsInLoop(i)) {
- LoopInfo* loop = &loops_[i - 1];
- if (innermost == nullptr ||
- loop->loop->depth_ > innermost->loop->depth_) {
- innermost = loop;
- index = i;
+ int innermost_index = 0;
+ int pos = ni.node->id() * width_;
+ // Search the marks word by word.
+ for (int i = 0; i < width_; i++) {
+ uint32_t marks = backward_[pos + i] & forward_[pos + i];
+ for (int j = 0; j < 32; j++) {
+ if (marks & (1u << j)) {
+ int loop_num = i * 32 + j;
+ if (loop_num == 0) continue;
+ LoopInfo* loop = &loops_[loop_num - 1];
+ if (innermost == nullptr ||
+ loop->loop->depth_ > innermost->loop->depth_) {
+ innermost = loop;
+ innermost_index = loop_num;
+ }
}
}
}
- if (ni.IsInHeaderForLoop(index)) {
+ if (innermost == nullptr) continue;
+ if (LoopNum(ni.node) == innermost_index) {
ni.next = innermost->header_list;
innermost->header_list = &ni;
} else {
@@ -301,18 +365,14 @@ class LoopFinderImpl {
// Handle the simpler case of a single loop (no checks for nesting necessary).
void FinishSingleLoop() {
- DCHECK(loops_.size() == 1);
- DCHECK(loop_tree_->all_loops_.size() == 1);
-
// Place nodes into the loop header and body.
LoopInfo* li = &loops_[0];
li->loop = &loop_tree_->all_loops_[0];
loop_tree_->SetParent(nullptr, li->loop);
size_t count = 0;
for (NodeInfo& ni : info_) {
- if (ni.node == nullptr || !ni.IsInAnyLoop()) continue;
- DCHECK(ni.IsInLoop(1));
- if (ni.IsInHeaderForLoop(1)) {
+ if (ni.node == nullptr || !IsInLoop(ni.node, 1)) continue;
+ if (LoopNum(ni.node) == 1) {
ni.next = li->header_list;
li->header_list = &ni;
} else {
@@ -330,25 +390,21 @@ class LoopFinderImpl {
// Recursively serialize the list of header nodes and body nodes
// so that nested loops occupy nested intervals.
void SerializeLoop(LoopTree::Loop* loop) {
- size_t loop_num = loop_tree_->LoopNum(loop);
+ int loop_num = loop_tree_->LoopNum(loop);
LoopInfo& li = loops_[loop_num - 1];
// Serialize the header.
loop->header_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
for (NodeInfo* ni = li.header_list; ni != nullptr; ni = ni->next) {
loop_tree_->loop_nodes_.push_back(ni->node);
- // TODO(titzer): lift loop count restriction.
- loop_tree_->node_to_loop_num_[ni->node->id()] =
- static_cast<uint8_t>(loop_num);
+ loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num;
}
// Serialize the body.
loop->body_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
for (NodeInfo* ni = li.body_list; ni != nullptr; ni = ni->next) {
loop_tree_->loop_nodes_.push_back(ni->node);
- // TODO(titzer): lift loop count restriction.
- loop_tree_->node_to_loop_num_[ni->node->id()] =
- static_cast<uint8_t>(loop_num);
+ loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num;
}
// Serialize nested loops.
@@ -358,15 +414,15 @@ class LoopFinderImpl {
}
// Connect the LoopTree loops to their parents recursively.
- LoopTree::Loop* ConnectLoopTree(size_t loop_num) {
+ LoopTree::Loop* ConnectLoopTree(int loop_num) {
LoopInfo& li = loops_[loop_num - 1];
if (li.loop != nullptr) return li.loop;
NodeInfo& ni = info(li.header);
LoopTree::Loop* parent = nullptr;
- for (size_t i = 1; i <= loops_.size(); i++) {
+ for (int i = 1; i <= loops_found_; i++) {
if (i == loop_num) continue;
- if (ni.IsInLoop(i)) {
+ if (IsInLoop(ni.node, i)) {
// recursively create potential parent loops first.
LoopTree::Loop* upper = ConnectLoopTree(i);
if (parent == nullptr || upper->depth_ > parent->depth_) {
@@ -406,6 +462,16 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
return loop_tree;
}
+
+Node* LoopTree::HeaderNode(Loop* loop) {
+ Node* first = *HeaderNodes(loop).begin();
+ if (first->opcode() == IrOpcode::kLoop) return first;
+ DCHECK(IrOpcode::IsPhiOpcode(first->opcode()));
+ Node* header = NodeProperties::GetControlInput(first);
+ DCHECK_EQ(IrOpcode::kLoop, header->opcode());
+ return header;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 8c8d19ac69..71f946113d 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -25,7 +25,7 @@ class LoopTree : public ZoneObject {
: zone_(zone),
outer_loops_(zone),
all_loops_(zone),
- node_to_loop_num_(static_cast<int>(num_nodes), 0, zone),
+ node_to_loop_num_(static_cast<int>(num_nodes), -1, zone),
loop_nodes_(zone) {}
// Represents a loop in the tree of loops, including the header nodes,
@@ -37,6 +37,7 @@ class LoopTree : public ZoneObject {
size_t HeaderSize() const { return body_start_ - header_start_; }
size_t BodySize() const { return body_end_ - body_start_; }
size_t TotalSize() const { return body_end_ - header_start_; }
+ size_t depth() const { return static_cast<size_t>(depth_); }
private:
friend class LoopTree;
@@ -61,7 +62,7 @@ class LoopTree : public ZoneObject {
Loop* ContainingLoop(Node* node) {
if (node->id() >= static_cast<int>(node_to_loop_num_.size()))
return nullptr;
- uint8_t num = node_to_loop_num_[node->id()];
+ int num = node_to_loop_num_[node->id()];
return num > 0 ? &all_loops_[num - 1] : nullptr;
}
@@ -88,12 +89,31 @@ class LoopTree : public ZoneObject {
&loop_nodes_[0] + loop->body_start_);
}
+ // Return the header control node for a loop.
+ Node* HeaderNode(Loop* loop);
+
// Return a range which can iterate over the body nodes of {loop}.
NodeRange BodyNodes(Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->body_start_,
&loop_nodes_[0] + loop->body_end_);
}
+ // Return a range which can iterate over the nodes of {loop}.
+ NodeRange LoopNodes(Loop* loop) {
+ return NodeRange(&loop_nodes_[0] + loop->header_start_,
+ &loop_nodes_[0] + loop->body_end_);
+ }
+
+ // Return the node that represents the control, i.e. the loop node itself.
+ Node* GetLoopControl(Loop* loop) {
+ // TODO(turbofan): make the loop control node always first?
+ for (Node* node : HeaderNodes(loop)) {
+ if (node->opcode() == IrOpcode::kLoop) return node;
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
private:
friend class LoopFinderImpl;
@@ -116,8 +136,7 @@ class LoopTree : public ZoneObject {
Zone* zone_;
ZoneVector<Loop*> outer_loops_;
ZoneVector<Loop> all_loops_;
- // TODO(titzer): lift loop count restriction.
- ZoneVector<uint8_t> node_to_loop_num_;
+ ZoneVector<int> node_to_loop_num_;
ZoneVector<Node*> loop_nodes_;
};
@@ -128,6 +147,7 @@ class LoopFinder {
static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone);
};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
new file mode 100644
index 0000000000..39f487f854
--- /dev/null
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -0,0 +1,339 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/loop-peeling.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node-properties.h"
+#include "src/zone.h"
+
+// Loop peeling is an optimization that copies the body of a loop, creating
+// a new copy of the body called the "peeled iteration" that represents the
+// first iteration. Beginning with a loop as follows:
+
+// E
+// | A
+// | | (backedges)
+// | +---------------|---------------------------------+
+// | | +-------------|-------------------------------+ |
+// | | | | +--------+ | |
+// | | | | | +----+ | | |
+// | | | | | | | | | |
+// ( Loop )<-------- ( phiA ) | | | |
+// | | | | | |
+// ((======P=================U=======|=|=====)) | |
+// (( | | )) | |
+// (( X <---------------------+ | )) | |
+// (( | )) | |
+// (( body | )) | |
+// (( | )) | |
+// (( Y <-----------------------+ )) | |
+// (( )) | |
+// ((===K====L====M==========================)) | |
+// | | | | |
+// | | +-----------------------------------------+ |
+// | +------------------------------------------------+
+// |
+// exit
+
+// The body of the loop is duplicated so that all nodes considered "inside"
+// the loop (e.g. {P, U, X, Y, K, L, M}) have a corresponding copies in the
+// peeled iteration (e.g. {P', U', X', Y', K', L', M'}). What were considered
+// backedges of the loop correspond to edges from the peeled iteration to
+// the main loop body, with multiple backedges requiring a merge.
+
+// Similarly, any exits from the loop body need to be merged with "exits"
+// from the peeled iteration, resulting in the graph as follows:
+
+// E
+// | A
+// | |
+// ((=====P'================U'===============))
+// (( ))
+// (( X'<-------------+ ))
+// (( | ))
+// (( peeled iteration | ))
+// (( | ))
+// (( Y'<-----------+ | ))
+// (( | | ))
+// ((===K'===L'====M'======|=|===============))
+// | | | | |
+// +--------+ +-+ +-+ | |
+// | | | | |
+// | Merge <------phi
+// | | |
+// | +-----+ |
+// | | | (backedges)
+// | | +---------------|---------------------------------+
+// | | | +-------------|-------------------------------+ |
+// | | | | | +--------+ | |
+// | | | | | | +----+ | | |
+// | | | | | | | | | | |
+// | ( Loop )<-------- ( phiA ) | | | |
+// | | | | | | |
+// | ((======P=================U=======|=|=====)) | |
+// | (( | | )) | |
+// | (( X <---------------------+ | )) | |
+// | (( | )) | |
+// | (( body | )) | |
+// | (( | )) | |
+// | (( Y <-----------------------+ )) | |
+// | (( )) | |
+// | ((===K====L====M==========================)) | |
+// | | | | | |
+// | | | +-----------------------------------------+ |
+// | | +------------------------------------------------+
+// | |
+// | |
+// +----+ +-+
+// | |
+// Merge
+// |
+// exit
+
+// Note that the boxes ((===)) above are not explicitly represented in the
+// graph, but are instead computed by the {LoopFinder}.
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct Peeling {
+ // Maps a node to its index in the {pairs} vector.
+ NodeMarker<size_t> node_map;
+ // The vector which contains the mapped nodes.
+ NodeVector* pairs;
+
+ Peeling(Graph* graph, Zone* tmp_zone, size_t max, NodeVector* p)
+ : node_map(graph, static_cast<uint32_t>(max)), pairs(p) {}
+
+ Node* map(Node* node) {
+ if (node_map.Get(node) == 0) return node;
+ return pairs->at(node_map.Get(node));
+ }
+
+ void Insert(Node* original, Node* copy) {
+ node_map.Set(original, 1 + pairs->size());
+ pairs->push_back(original);
+ pairs->push_back(copy);
+ }
+
+ void CopyNodes(Graph* graph, Zone* tmp_zone, Node* dead, NodeRange nodes) {
+ NodeVector inputs(tmp_zone);
+ // Copy all the nodes first.
+ for (Node* node : nodes) {
+ inputs.clear();
+ for (Node* input : node->inputs()) inputs.push_back(map(input));
+ Insert(node, graph->NewNode(node->op(), node->InputCount(), &inputs[0]));
+ }
+
+ // Fix remaining inputs of the copies.
+ for (Node* original : nodes) {
+ Node* copy = pairs->at(node_map.Get(original));
+ for (int i = 0; i < copy->InputCount(); i++) {
+ copy->ReplaceInput(i, map(original->InputAt(i)));
+ }
+ }
+ }
+
+ bool Marked(Node* node) { return node_map.Get(node) > 0; }
+};
+
+
+class PeeledIterationImpl : public PeeledIteration {
+ public:
+ NodeVector node_pairs_;
+ explicit PeeledIterationImpl(Zone* zone) : node_pairs_(zone) {}
+};
+
+
+Node* PeeledIteration::map(Node* node) {
+ // TODO(turbofan): we use a simple linear search, since the peeled iteration
+ // is really only used in testing.
+ PeeledIterationImpl* impl = static_cast<PeeledIterationImpl*>(this);
+ for (size_t i = 0; i < impl->node_pairs_.size(); i += 2) {
+ if (impl->node_pairs_[i] == node) return impl->node_pairs_[i + 1];
+ }
+ return node;
+}
+
+
+PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
+ LoopTree* loop_tree, LoopTree::Loop* loop,
+ Zone* tmp_zone) {
+ PeeledIterationImpl* iter = new (tmp_zone) PeeledIterationImpl(tmp_zone);
+ Peeling peeling(graph, tmp_zone, loop->TotalSize() * 2 + 2,
+ &iter->node_pairs_);
+
+ //============================================================================
+ // Construct the peeled iteration.
+ //============================================================================
+ Node* dead = graph->NewNode(common->Dead());
+
+ // Map the loop header nodes to their entry values.
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ // TODO(titzer): assuming loop entry at index 0.
+ peeling.Insert(node, node->InputAt(0));
+ }
+
+ // Copy all the nodes of loop body for the peeled iteration.
+ peeling.CopyNodes(graph, tmp_zone, dead, loop_tree->BodyNodes(loop));
+
+ //============================================================================
+ // Replace the entry to the loop with the output of the peeled iteration.
+ //============================================================================
+ Node* loop_node = loop_tree->GetLoopControl(loop);
+ Node* new_entry;
+ int backedges = loop_node->InputCount() - 1;
+ if (backedges > 1) {
+ // Multiple backedges from original loop, therefore multiple output edges
+ // from the peeled iteration.
+ NodeVector inputs(tmp_zone);
+ for (int i = 1; i < loop_node->InputCount(); i++) {
+ inputs.push_back(peeling.map(loop_node->InputAt(i)));
+ }
+ Node* merge =
+ graph->NewNode(common->Merge(backedges), backedges, &inputs[0]);
+
+ // Merge values from the multiple output edges of the peeled iteration.
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ if (node->opcode() == IrOpcode::kLoop) continue; // already done.
+ inputs.clear();
+ for (int i = 0; i < backedges; i++) {
+ inputs.push_back(peeling.map(node->InputAt(1 + i)));
+ }
+ for (Node* input : inputs) {
+ if (input != inputs[0]) { // Non-redundant phi.
+ inputs.push_back(merge);
+ const Operator* op = common->ResizeMergeOrPhi(node->op(), backedges);
+ Node* phi = graph->NewNode(op, backedges + 1, &inputs[0]);
+ node->ReplaceInput(0, phi);
+ break;
+ }
+ }
+ }
+ new_entry = merge;
+ } else {
+ // Only one backedge, simply replace the input to loop with output of
+ // peeling.
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ node->ReplaceInput(0, peeling.map(node->InputAt(0)));
+ }
+ new_entry = peeling.map(loop_node->InputAt(1));
+ }
+ loop_node->ReplaceInput(0, new_entry);
+
+ //============================================================================
+ // Find the loop exit region.
+ //============================================================================
+ NodeVector exits(tmp_zone);
+ Node* end = NULL;
+ for (Node* node : loop_tree->LoopNodes(loop)) {
+ for (Node* use : node->uses()) {
+ if (!loop_tree->Contains(loop, use)) {
+ if (node->opcode() == IrOpcode::kBranch &&
+ (use->opcode() == IrOpcode::kIfTrue ||
+ use->opcode() == IrOpcode::kIfFalse)) {
+ // This is a branch from inside the loop to outside the loop.
+ exits.push_back(use);
+ }
+ }
+ }
+ }
+
+ if (exits.size() == 0) return iter; // no exits => NTL
+
+ if (exits.size() == 1) {
+ // Only one exit, so {end} is that exit.
+ end = exits[0];
+ } else {
+ // {end} should be the common merge from the exits.
+ NodeVector rets(tmp_zone);
+ for (Node* exit : exits) {
+ Node* found = NULL;
+ for (Node* use : exit->uses()) {
+ if (use->opcode() == IrOpcode::kMerge) {
+ found = use;
+ if (end == NULL) {
+ end = found;
+ } else {
+ CHECK_EQ(end, found); // it should be unique!
+ }
+ } else if (use->opcode() == IrOpcode::kReturn) {
+ found = use;
+ rets.push_back(found);
+ }
+ }
+ // There should be a merge or a return for each exit.
+ CHECK(found);
+ }
+ // Return nodes, the end merge, and the phis associated with the end merge
+ // must be duplicated as well.
+ for (Node* node : rets) exits.push_back(node);
+ if (end != NULL) {
+ exits.push_back(end);
+ for (Node* use : end->uses()) {
+ if (NodeProperties::IsPhi(use)) exits.push_back(use);
+ }
+ }
+ }
+
+ //============================================================================
+ // Duplicate the loop exit region and add a merge.
+ //============================================================================
+ NodeRange exit_range(&exits[0], &exits[0] + exits.size());
+ peeling.CopyNodes(graph, tmp_zone, dead, exit_range);
+
+ Node* merge = graph->NewNode(common->Merge(2), end, peeling.map(end));
+ end->ReplaceUses(merge);
+ merge->ReplaceInput(0, end); // HULK SMASH!!
+
+ // Find and update all the edges into either the loop or exit region.
+ for (int i = 0; i < 2; i++) {
+ NodeRange range = i == 0 ? loop_tree->LoopNodes(loop) : exit_range;
+ ZoneVector<Edge> value_edges(tmp_zone);
+ ZoneVector<Edge> effect_edges(tmp_zone);
+
+ for (Node* node : range) {
+ // Gather value and effect edges from outside the region.
+ for (Edge edge : node->use_edges()) {
+ if (!peeling.Marked(edge.from())) {
+ // Edge from outside the loop into the region.
+ if (NodeProperties::IsValueEdge(edge) ||
+ NodeProperties::IsContextEdge(edge)) {
+ value_edges.push_back(edge);
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ effect_edges.push_back(edge);
+ } else {
+ // don't do anything for control edges.
+ // TODO(titzer): should update control edges to peeled?
+ }
+ }
+ }
+
+ // Update all the value and effect edges at once.
+ if (!value_edges.empty()) {
+ // TODO(titzer): machine type is wrong here.
+ Node* phi = graph->NewNode(common->Phi(kMachAnyTagged, 2), node,
+ peeling.map(node), merge);
+ for (Edge edge : value_edges) edge.UpdateTo(phi);
+ value_edges.clear();
+ }
+ if (!effect_edges.empty()) {
+ Node* effect_phi = graph->NewNode(common->EffectPhi(2), node,
+ peeling.map(node), merge);
+ for (Edge edge : effect_edges) edge.UpdateTo(effect_phi);
+ effect_edges.clear();
+ }
+ }
+ }
+
+ return iter;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
new file mode 100644
index 0000000000..3cbca782a7
--- /dev/null
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -0,0 +1,42 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOOP_PEELING_H_
+#define V8_COMPILER_LOOP_PEELING_H_
+
+#include "src/compiler/loop-analysis.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Represents the output of peeling a loop, which is basically the mapping
+// from the body of the loop to the corresponding nodes in the peeled
+// iteration.
+class PeeledIteration : public ZoneObject {
+ public:
+ // Maps {node} to its corresponding copy in the peeled iteration, if
+ // the node was part of the body of the loop. Returns {node} otherwise.
+ Node* map(Node* node);
+
+ protected:
+ PeeledIteration() {}
+};
+
+class CommonOperatorBuilder;
+
+// Implements loop peeling.
+class LoopPeeler {
+ public:
+ static PeeledIteration* Peel(Graph* graph, CommonOperatorBuilder* common,
+ LoopTree* loop_tree, LoopTree::Loop* loop,
+ Zone* tmp_zone);
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_LOOP_PEELING_H_
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index c3e45a1b25..8f91d49f81 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -75,7 +75,9 @@ Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) {
Node* MachineOperatorReducer::Int32Sub(Node* lhs, Node* rhs) {
- return graph()->NewNode(machine()->Int32Sub(), lhs, rhs);
+ Node* const node = graph()->NewNode(machine()->Int32Sub(), lhs, rhs);
+ Reduction const reduction = ReduceInt32Sub(node);
+ return reduction.Changed() ? reduction.replacement() : node;
}
@@ -101,13 +103,19 @@ Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) {
Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
- DCHECK_LT(0, divisor);
+ DCHECK_LT(0u, divisor);
+ // If the divisor is even, we can avoid using the expensive fixup by shifting
+ // the dividend upfront.
+ unsigned const shift = base::bits::CountTrailingZeros32(divisor);
+ dividend = Word32Shr(dividend, shift);
+ divisor >>= shift;
+ // Compute the magic number for the (shifted) divisor.
base::MagicNumbersForDivision<uint32_t> const mag =
- base::UnsignedDivisionByConstant(bit_cast<uint32_t>(divisor));
+ base::UnsignedDivisionByConstant(divisor, shift);
Node* quotient = graph()->NewNode(machine()->Uint32MulHigh(), dividend,
Uint32Constant(mag.multiplier));
if (mag.add) {
- DCHECK_LE(1, mag.shift);
+ DCHECK_LE(1u, mag.shift);
quotient = Word32Shr(
Int32Add(Word32Shr(Int32Sub(dividend, quotient), 1), quotient),
mag.shift - 1);
@@ -122,7 +130,7 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
Reduction MachineOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kProjection:
- return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
+ return ReduceProjection(ProjectionIndexOf(node->op()), node->InputAt(0));
case IrOpcode::kWord32And:
return ReduceWord32And(node);
case IrOpcode::kWord32Or:
@@ -216,16 +224,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kInt32Add:
return ReduceInt32Add(node);
- case IrOpcode::kInt32Sub: {
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
- if (m.IsFoldable()) { // K - K => K
- return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
- static_cast<uint32_t>(m.right().Value()));
- }
- if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
- break;
- }
+ case IrOpcode::kInt32Sub:
+ return ReduceInt32Sub(node);
case IrOpcode::kInt32Mul: {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
@@ -390,7 +390,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Mod: {
Float64BinopMatcher m(node);
if (m.right().Is(0)) { // x % 0 => NaN
- return ReplaceFloat64(base::OS::nan_value());
+ return ReplaceFloat64(std::numeric_limits<double>::quiet_NaN());
}
if (m.right().IsNaN()) { // x % NaN => NaN
return Replace(m.right().node());
@@ -475,6 +475,25 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
}
+Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
+ DCHECK_EQ(IrOpcode::kInt32Sub, node->opcode());
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
+ if (m.IsFoldable()) { // K - K => K
+ return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
+ static_cast<uint32_t>(m.right().Value()));
+ }
+ if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
+ if (m.right().HasValue()) { // x - K => x + -K
+ node->set_op(machine()->Int32Add());
+ node->ReplaceInput(1, Int32Constant(-m.right().Value()));
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ return NoChange();
+}
+
+
Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
Int32BinopMatcher m(node);
if (m.left().Is(0)) return Replace(m.left().node()); // 0 / x => 0
@@ -501,7 +520,7 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
Node* quotient = dividend;
if (base::bits::IsPowerOfTwo32(Abs(divisor))) {
uint32_t const shift = WhichPowerOf2Abs(divisor);
- DCHECK_NE(0, shift);
+ DCHECK_NE(0u, shift);
if (shift > 1) {
quotient = Word32Sar(quotient, 31);
}
@@ -784,63 +803,73 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
}
- if (m.left().IsInt32Add() && m.right().IsNegativePowerOf2()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() &&
- (mleft.right().Value() & m.right().Value()) == mleft.right().Value()) {
- // (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
- node->set_op(machine()->Int32Add());
- node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
- node->ReplaceInput(1, mleft.right().node());
- Reduction const reduction = ReduceInt32Add(node);
- return reduction.Changed() ? reduction : Changed(node);
- }
- if (mleft.left().IsInt32Mul()) {
- Int32BinopMatcher mleftleft(mleft.left().node());
- if (mleftleft.right().IsMultipleOf(-m.right().Value())) {
- // (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
- node->set_op(machine()->Int32Add());
- node->ReplaceInput(0,
- Word32And(mleft.right().node(), m.right().node()));
- node->ReplaceInput(1, mleftleft.node());
- Reduction const reduction = ReduceInt32Add(node);
- return reduction.Changed() ? reduction : Changed(node);
- }
- }
- if (mleft.right().IsInt32Mul()) {
- Int32BinopMatcher mleftright(mleft.right().node());
- if (mleftright.right().IsMultipleOf(-m.right().Value())) {
- // (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+ if (m.right().IsNegativePowerOf2()) {
+ int32_t const mask = m.right().Value();
+ if (m.left().IsWord32Shl()) {
+ Uint32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() &&
+ mleft.right().Value() >= base::bits::CountTrailingZeros32(mask)) {
+ // (x << L) & (-1 << K) => x << L iff K >= L
+ return Replace(mleft.node());
+ }
+ } else if (m.left().IsInt32Add()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() &&
+ (mleft.right().Value() & mask) == mleft.right().Value()) {
+ // (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
node->set_op(machine()->Int32Add());
node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
- node->ReplaceInput(1, mleftright.node());
+ node->ReplaceInput(1, mleft.right().node());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
}
- }
- if (mleft.left().IsWord32Shl()) {
- Int32BinopMatcher mleftleft(mleft.left().node());
- if (mleftleft.right().Is(
- base::bits::CountTrailingZeros32(m.right().Value()))) {
- // (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
- node->set_op(machine()->Int32Add());
- node->ReplaceInput(0,
- Word32And(mleft.right().node(), m.right().node()));
- node->ReplaceInput(1, mleftleft.node());
- Reduction const reduction = ReduceInt32Add(node);
- return reduction.Changed() ? reduction : Changed(node);
+ if (mleft.left().IsInt32Mul()) {
+ Int32BinopMatcher mleftleft(mleft.left().node());
+ if (mleftleft.right().IsMultipleOf(-mask)) {
+ // (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+ node->set_op(machine()->Int32Add());
+ node->ReplaceInput(0,
+ Word32And(mleft.right().node(), m.right().node()));
+ node->ReplaceInput(1, mleftleft.node());
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
}
- }
- if (mleft.right().IsWord32Shl()) {
- Int32BinopMatcher mleftright(mleft.right().node());
- if (mleftright.right().Is(
- base::bits::CountTrailingZeros32(m.right().Value()))) {
- // (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
- node->set_op(machine()->Int32Add());
- node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
- node->ReplaceInput(1, mleftright.node());
- Reduction const reduction = ReduceInt32Add(node);
- return reduction.Changed() ? reduction : Changed(node);
+ if (mleft.right().IsInt32Mul()) {
+ Int32BinopMatcher mleftright(mleft.right().node());
+ if (mleftright.right().IsMultipleOf(-mask)) {
+ // (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+ node->set_op(machine()->Int32Add());
+ node->ReplaceInput(0,
+ Word32And(mleft.left().node(), m.right().node()));
+ node->ReplaceInput(1, mleftright.node());
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ if (mleft.left().IsWord32Shl()) {
+ Int32BinopMatcher mleftleft(mleft.left().node());
+ if (mleftleft.right().Is(base::bits::CountTrailingZeros32(mask))) {
+ // (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
+ node->set_op(machine()->Int32Add());
+ node->ReplaceInput(0,
+ Word32And(mleft.right().node(), m.right().node()));
+ node->ReplaceInput(1, mleftleft.node());
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ if (mleft.right().IsWord32Shl()) {
+ Int32BinopMatcher mleftright(mleft.right().node());
+ if (mleftright.right().Is(base::bits::CountTrailingZeros32(mask))) {
+ // (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
+ node->set_op(machine()->Int32Add());
+ node->ReplaceInput(0,
+ Word32And(mleft.left().node(), m.right().node()));
+ node->ReplaceInput(1, mleftright.node());
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
}
}
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 8200abbf95..9e02ffde72 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -65,6 +65,7 @@ class MachineOperatorReducer FINAL : public Reducer {
}
Reduction ReduceInt32Add(Node* node);
+ Reduction ReduceInt32Sub(Node* node);
Reduction ReduceInt32Div(Node* node);
Reduction ReduceUint32Div(Node* node);
Reduction ReduceInt32Mod(Node* node);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index eb034e92e3..2522a8e15d 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -7,8 +7,6 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/v8.h"
-#include "src/zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index dd92837bbd..58c4581663 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -6,7 +6,6 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/scopes.h"
@@ -202,6 +201,100 @@ class OutOfLineCeil FINAL : public OutOfLineRound {
: OutOfLineRound(gen, result) {}
};
+
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
+ switch (condition) {
+ case kOverflow:
+ return lt;
+ case kNotOverflow:
+ return ge;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ predicate = true;
+ return EQ;
+ case kNotEqual:
+ predicate = false;
+ return EQ;
+ case kUnsignedLessThan:
+ predicate = true;
+ return OLT;
+ case kUnsignedGreaterThanOrEqual:
+ predicate = false;
+ return ULT;
+ case kUnsignedLessThanOrEqual:
+ predicate = true;
+ return OLE;
+ case kUnsignedGreaterThan:
+ predicate = false;
+ return ULE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ predicate = true;
+ break;
+ default:
+ predicate = true;
+ break;
+ }
+ UNREACHABLE();
+ return kNoFPUCondition;
+}
+
} // namespace
@@ -335,6 +428,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
case kArchNop:
// don't emit code for nops.
break;
@@ -645,72 +744,18 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) {
- switch (branch->condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMipsTst, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
// kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
- switch (branch->condition) {
- case kOverflow:
- cc = lt;
- break;
- case kNotOverflow:
- cc = ge;
- break;
- default:
- UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionOvf(branch->condition);
__ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
} else if (instr->arch_opcode() == kMipsCmp) {
- switch (branch->condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmp, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
@@ -720,24 +765,24 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// even if we have to unfold BranchF macro.
Label* nan = flabel;
switch (branch->condition) {
- case kUnorderedEqual:
+ case kEqual:
cc = eq;
break;
- case kUnorderedNotEqual:
+ case kNotEqual:
cc = ne;
nan = tlabel;
break;
- case kUnorderedLessThan:
+ case kUnsignedLessThan:
cc = lt;
break;
- case kUnorderedGreaterThanOrEqual:
+ case kUnsignedGreaterThanOrEqual:
cc = ge;
nan = tlabel;
break;
- case kUnorderedLessThanOrEqual:
+ case kUnsignedLessThanOrEqual:
cc = le;
break;
- case kUnorderedGreaterThan:
+ case kUnsignedGreaterThan:
cc = gt;
nan = tlabel;
break;
@@ -772,7 +817,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label false_value;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
@@ -784,20 +829,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// in the false case, where we fall thru the branch, we reset the result
// false.
- // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
- // not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) {
- switch (condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMipsTst, condition);
- break;
- }
+ cc = FlagsConditionToConditionTst(condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot.
@@ -805,119 +838,85 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
// kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
- switch (condition) {
- case kOverflow:
- cc = lt;
- break;
- case kNotOverflow:
- cc = ge;
- break;
- default:
- UNSUPPORTED_COND(kMipsAddOvf, condition);
- break;
- }
+ cc = FlagsConditionToConditionOvf(condition);
__ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot.
-
} else if (instr->arch_opcode() == kMipsCmp) {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
- switch (condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmp, condition);
- break;
- }
+ cc = FlagsConditionToConditionCmp(condition);
__ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMipsCmpD) {
FPURegister left = i.InputDoubleRegister(0);
FPURegister right = i.InputDoubleRegister(1);
- // TODO(plind): Provide NaN-testing macro-asm function without need for
- // BranchF.
- FPURegister dummy1 = f0;
- FPURegister dummy2 = f2;
- switch (condition) {
- case kUnorderedEqual:
- // TODO(plind): improve the NaN testing throughout this function.
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = eq;
- break;
- case kUnorderedNotEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ne;
- break;
- case kUnorderedLessThan:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = lt;
- break;
- case kUnorderedGreaterThanOrEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ge;
- break;
- case kUnorderedLessThanOrEqual:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = le;
- break;
- case kUnorderedGreaterThan:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = gt;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmp, condition);
- break;
- }
- __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
- __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
- // Fall-thru (branch not taken) returns 0.
+ bool predicate;
+ FPUCondition cc = FlagsConditionToConditionCmpD(predicate, condition);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ __ li(result, Operand(1));
+ __ c(cc, D, left, right);
+ if (predicate) {
+ __ Movf(result, zero_reg);
+ } else {
+ __ Movt(result, zero_reg);
+ }
+ } else {
+ __ cmp(cc, L, kDoubleCompareReg, left, right);
+ __ mfc1(at, kDoubleCompareReg);
+ __ srl(result, at, 31); // Cmp returns all 1s for true.
+ if (!predicate) // Toggle result for not equal.
+ __ xori(result, result, 1);
+ }
+ return;
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
instr->arch_opcode());
TRACE_UNIMPL();
UNIMPLEMENTED();
}
- // Fallthru case is the false materialization.
+
+ // Fallthrough case is the false materialization.
__ bind(&false_value);
__ li(result, Operand(0));
__ bind(&done);
}
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ li(at, Operand(i.InputInt32(index + 0)));
+ __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
+ }
+ __ nop(); // Branch delay slot of the last beq.
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label here;
+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
+ __ BlockTrampolinePoolFor(case_count + 6);
+ __ bal(&here);
+ __ sll(at, input, 2); // Branch delay slot.
+ __ bind(&here);
+ __ addu(at, at, ra);
+ __ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ __ jr(at);
+ __ nop(); // Branch delay slot nop.
+ for (size_t index = 0; index < case_count; ++index) {
+ __ dd(GetLabel(i.InputRpo(index + 2)));
+ }
+}
+
+
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
@@ -927,6 +926,7 @@ void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ Push(ra, fp);
__ mov(fp, sp);
@@ -946,12 +946,26 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ } else if (stack_slots > 0) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
- int stack_slots = frame()->GetSpillSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
+ stack_slots -= frame()->GetOsrStackSlotCount();
+ }
+
if (stack_slots > 0) {
__ Subu(sp, sp, Operand(stack_slots * kPointerSize));
}
@@ -960,10 +974,10 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ Addu(sp, sp, Operand(stack_slots * kPointerSize));
}
@@ -976,13 +990,15 @@ void CodeGenerator::AssembleReturn() {
__ mov(sp, fp);
__ Pop(ra, fp);
__ Ret();
- } else {
+ } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ mov(sp, fp);
__ Pop(ra, fp);
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ DropAndRet(pop_count);
+ } else {
+ __ Ret();
}
}
@@ -1150,6 +1166,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 32-bit MIPS we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// Unused on 32-bit ARM. Still exists on 64-bit arm.
// TODO(plind): Unclear when this is called now. Understand, fix if needed.
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 5e8e3b1d43..d723453c01 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -5,6 +5,7 @@
#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -22,7 +23,7 @@ class MipsOperandGenerator FINAL : public OperandGenerator {
explicit MipsOperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
if (CanBeImmediate(node, opcode)) {
return UseImmediate(node);
}
@@ -90,9 +91,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
@@ -108,8 +109,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
@@ -161,7 +162,7 @@ void InstructionSelector::VisitLoad(Node* node) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
- InstructionOperand* addr_reg = g.TempRegister();
+ InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
@@ -184,8 +185,8 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMipsStoreWriteBarrier, NULL, g.UseFixed(base, t0),
+ InstructionOperand temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
+ Emit(kMipsStoreWriteBarrier, g.NoOutput(), g.UseFixed(base, t0),
g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
return;
}
@@ -216,15 +217,15 @@ void InstructionSelector::VisitStore(Node* node) {
}
if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
- InstructionOperand* addr_reg = g.TempRegister();
+ InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
- g.TempImmediate(0), g.UseRegister(value));
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
}
}
@@ -289,7 +290,7 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -298,7 +299,7 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
@@ -465,13 +466,13 @@ void InstructionSelector::VisitCall(Node* node) {
// Possibly align stack here for functions.
int push_count = buffer.pushed_nodes.size();
if (push_count > 0) {
- Emit(kMipsStackClaim | MiscField::encode(push_count), NULL);
+ Emit(kMipsStackClaim | MiscField::encode(push_count), g.NoOutput());
}
int slot = buffer.pushed_nodes.size() - 1;
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- Emit(kMipsStoreToStackSlot | MiscField::encode(slot), NULL,
- g.UseRegister(*input));
+ for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
+ ++i) {
+ Emit(kMipsStoreToStackSlot | MiscField::encode(slot), g.NoOutput(),
+ g.UseRegister(*i));
slot--;
}
@@ -492,7 +493,7 @@ void InstructionSelector::VisitCall(Node* node) {
opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
- InstructionOperand** first_output =
+ InstructionOperand* first_output =
buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
Instruction* call_instr =
Emit(opcode, buffer.outputs.size(), first_output,
@@ -529,15 +530,15 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
@@ -573,18 +574,19 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
- length_operand, g.UseRegister(value), g.UseRegister(buffer));
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
+
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
+
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ offset_operand, length_operand, g.UseRegister(value),
+ g.UseRegister(buffer));
}
@@ -592,12 +594,13 @@ namespace {
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
DCHECK(cont->IsSet());
@@ -680,25 +683,25 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
- Node* const result = node->FindProjection(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
if (!result || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
@@ -724,9 +727,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Continuation could not be combined with a compare, emit compare against 0.
MipsOperandGenerator g(selector);
InstructionCode const opcode = cont->Encode(kMipsCmp);
- InstructionOperand* const value_operand = g.UseRegister(value);
+ InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
@@ -743,6 +746,66 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ MipsOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 9 + value_range;
+ size_t table_time_cost = 9;
+ size_t lookup_space_cost = 2 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMipsSub, index_operand, value_operand, g.TempImmediate(min_value));
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -778,7 +841,7 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMipsAddOvf, &cont);
}
@@ -788,7 +851,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMipsSubOvf, &cont);
}
@@ -798,19 +861,19 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/mips/linkage-mips.cc b/deps/v8/src/compiler/mips/linkage-mips.cc
index 2b314a2280..cbb59d3a10 100644
--- a/deps/v8/src/compiler/mips/linkage-mips.cc
+++ b/deps/v8/src/compiler/mips/linkage-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/assembler.h"
#include "src/code-stubs.h"
#include "src/compiler/linkage.h"
@@ -35,30 +33,32 @@ struct MipsLinkageHelperTraits {
typedef LinkageHelper<MipsLinkageHelperTraits> LH;
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+ return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
+ return LH::GetStubCallDescriptor(isolate, zone, descriptor,
+ stack_parameter_count, flags, properties);
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
+ const MachineSignature* sig) {
return LH::GetSimplifiedCDescriptor(zone, sig);
}
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index dee7705f05..60e016fa22 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -6,7 +6,6 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/scopes.h"
@@ -203,6 +202,100 @@ class OutOfLineCeil FINAL : public OutOfLineRound {
};
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
+ switch (condition) {
+ case kOverflow:
+ return ne;
+ case kNotOverflow:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ predicate = true;
+ return EQ;
+ case kNotEqual:
+ predicate = false;
+ return EQ;
+ case kUnsignedLessThan:
+ predicate = true;
+ return OLT;
+ case kUnsignedGreaterThanOrEqual:
+ predicate = false;
+ return ULT;
+ case kUnsignedLessThanOrEqual:
+ predicate = true;
+ return OLE;
+ case kUnsignedGreaterThan:
+ predicate = false;
+ return ULE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ predicate = true;
+ break;
+ default:
+ predicate = true;
+ break;
+ }
+ UNREACHABLE();
+ return kNoFPUCondition;
+}
+
} // namespace
@@ -335,6 +428,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
case kArchNop:
// don't emit code for nops.
break;
@@ -715,31 +814,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// not separated by other instructions.
if (instr->arch_opcode() == kMips64Tst) {
- switch (branch->condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Tst, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64Tst32) {
- switch (branch->condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Tst32, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionTst(branch->condition);
// Zero-extend registers on MIPS64 only 64-bit operand
// branch and compare op. is available.
// This is a disadvantage to perform 32-bit operation on MIPS64.
@@ -750,97 +829,19 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
- switch (branch->condition) {
- case kOverflow:
- cc = ne;
- break;
- case kNotOverflow:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Dadd, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionOvf(branch->condition);
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
} else if (instr->arch_opcode() == kMips64Cmp) {
- switch (branch->condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
} else if (instr->arch_opcode() == kMips64Cmp32) {
- switch (branch->condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp32, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionCmp(branch->condition);
switch (branch->condition) {
case kEqual:
@@ -879,24 +880,24 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// even if we have to unfold BranchF macro.
Label* nan = flabel;
switch (branch->condition) {
- case kUnorderedEqual:
+ case kEqual:
cc = eq;
break;
- case kUnorderedNotEqual:
+ case kNotEqual:
cc = ne;
nan = tlabel;
break;
- case kUnorderedLessThan:
+ case kUnsignedLessThan:
cc = lt;
break;
- case kUnorderedGreaterThanOrEqual:
+ case kUnsignedGreaterThanOrEqual:
cc = ge;
nan = tlabel;
break;
- case kUnorderedLessThanOrEqual:
+ case kUnsignedLessThanOrEqual:
cc = le;
break;
- case kUnorderedGreaterThan:
+ case kUnsignedGreaterThan:
cc = gt;
nan = tlabel;
break;
@@ -930,7 +931,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label false_value;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
@@ -943,32 +944,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// false.
if (instr->arch_opcode() == kMips64Tst) {
- switch (condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Tst, condition);
- break;
- }
+ cc = FlagsConditionToConditionTst(condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64Tst32) {
- switch (condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Tst, condition);
- break;
- }
+ cc = FlagsConditionToConditionTst(condition);
// Zero-extend register on MIPS64 only 64-bit operand
// branch and compare op. is available.
__ And(at, i.InputRegister(0), i.InputOperand(1));
@@ -977,17 +958,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
- switch (condition) {
- case kOverflow:
- cc = ne;
- break;
- case kNotOverflow:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64DAdd, condition);
- break;
- }
+ cc = FlagsConditionToConditionOvf(condition);
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
@@ -995,81 +966,13 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
} else if (instr->arch_opcode() == kMips64Cmp) {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
- switch (condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, condition);
- break;
- }
+ cc = FlagsConditionToConditionCmp(condition);
__ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64Cmp32) {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
- switch (condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, condition);
- break;
- }
+ cc = FlagsConditionToConditionCmp(condition);
switch (condition) {
case kEqual:
@@ -1105,47 +1008,25 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
} else if (instr->arch_opcode() == kMips64CmpD) {
FPURegister left = i.InputDoubleRegister(0);
FPURegister right = i.InputDoubleRegister(1);
- // TODO(plind): Provide NaN-testing macro-asm function without need for
- // BranchF.
- FPURegister dummy1 = f0;
- FPURegister dummy2 = f2;
- switch (condition) {
- case kUnorderedEqual:
- // TODO(plind): improve the NaN testing throughout this function.
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = eq;
- break;
- case kUnorderedNotEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ne;
- break;
- case kUnorderedLessThan:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = lt;
- break;
- case kUnorderedGreaterThanOrEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ge;
- break;
- case kUnorderedLessThanOrEqual:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = le;
- break;
- case kUnorderedGreaterThan:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = gt;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, condition);
- break;
- }
- __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
- __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
- // Fall-thru (branch not taken) returns 0.
+ bool predicate;
+ FPUCondition cc = FlagsConditionToConditionCmpD(predicate, condition);
+ if (kArchVariant != kMips64r6) {
+ __ li(result, Operand(1));
+ __ c(cc, D, left, right);
+ if (predicate) {
+ __ Movf(result, zero_reg);
+ } else {
+ __ Movt(result, zero_reg);
+ }
+ } else {
+ __ cmp(cc, L, kDoubleCompareReg, left, right);
+ __ dmfc1(at, kDoubleCompareReg);
+ __ dsrl32(result, at, 31); // Cmp returns all 1s for true.
+ if (!predicate) // Toggle result for not equal.
+ __ xori(result, result, 1);
+ }
+ return;
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
instr->arch_opcode());
@@ -1159,6 +1040,43 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ li(at, Operand(i.InputInt32(index + 0)));
+ __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
+ }
+ __ nop(); // Branch delay slot of the last beq.
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label here;
+
+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
+ __ BlockTrampolinePoolFor(case_count * 2 + 7);
+ // Ensure that dd-ed labels use 8 byte aligned addresses.
+ if ((masm()->pc_offset() & 7) != 0) {
+ __ nop();
+ }
+ __ bal(&here);
+ __ dsll(at, input, 3); // Branch delay slot.
+ __ bind(&here);
+ __ daddu(at, at, ra);
+ __ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ __ jr(at);
+ __ nop(); // Branch delay slot nop.
+ for (size_t index = 0; index < case_count; ++index) {
+ __ dd(GetLabel(i.InputRpo(index + 2)));
+ }
+}
+
+
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
@@ -1168,6 +1086,7 @@ void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ Push(ra, fp);
__ mov(fp, sp);
@@ -1187,30 +1106,26 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
- Label ok;
- // +2 for return address and saved frame pointer.
- int receiver_slot = info->scope()->num_parameters() + 2;
- __ ld(a2, MemOperand(fp, receiver_slot * kPointerSize));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ ld(a2, GlobalObjectOperand());
- __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
- __ sd(a2, MemOperand(fp, receiver_slot * kPointerSize));
- __ bind(&ok);
- }
- } else {
+ } else if (stack_slots > 0) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
- int stack_slots = frame()->GetSpillSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
+ stack_slots -= frame()->GetOsrStackSlotCount();
+ }
+
if (stack_slots > 0) {
__ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
}
@@ -1219,10 +1134,10 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
}
@@ -1235,13 +1150,15 @@ void CodeGenerator::AssembleReturn() {
__ mov(sp, fp);
__ Pop(ra, fp);
__ Ret();
- } else {
+ } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ mov(sp, fp);
__ Pop(ra, fp);
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ DropAndRet(pop_count);
+ } else {
+ __ Ret();
}
}
@@ -1409,6 +1326,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit MIPS we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// Unused on 32-bit ARM. Still exists on 64-bit arm.
// TODO(plind): Unclear when this is called now. Understand, fix if needed.
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 35ad16ba23..779f786468 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -5,6 +5,7 @@
#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -22,7 +23,7 @@ class Mips64OperandGenerator FINAL : public OperandGenerator {
explicit Mips64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
if (CanBeImmediate(node, opcode)) {
return UseImmediate(node);
}
@@ -123,9 +124,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
@@ -141,8 +142,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
@@ -197,7 +198,7 @@ void InstructionSelector::VisitLoad(Node* node) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
- InstructionOperand* addr_reg = g.TempRegister();
+ InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
@@ -220,8 +221,8 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMips64StoreWriteBarrier, NULL, g.UseFixed(base, t0),
+ InstructionOperand temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
+ Emit(kMips64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, t0),
g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
return;
}
@@ -255,15 +256,15 @@ void InstructionSelector::VisitStore(Node* node) {
}
if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
- InstructionOperand* addr_reg = g.TempRegister();
+ InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
- g.TempImmediate(0), g.UseRegister(value));
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
}
}
@@ -374,7 +375,7 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -383,7 +384,7 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
@@ -406,7 +407,7 @@ void InstructionSelector::VisitInt32MulHigh(Node* node) {
void InstructionSelector::VisitUint32MulHigh(Node* node) {
Mips64OperandGenerator g(this);
- InstructionOperand* const dmul_operand = g.TempRegister();
+ InstructionOperand const dmul_operand = g.TempRegister();
Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
@@ -427,7 +428,7 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -436,7 +437,7 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
@@ -645,13 +646,13 @@ void InstructionSelector::VisitCall(Node* node) {
int push_count = buffer.pushed_nodes.size();
if (push_count > 0) {
- Emit(kMips64StackClaim | MiscField::encode(push_count), NULL);
+ Emit(kMips64StackClaim | MiscField::encode(push_count), g.NoOutput());
}
int slot = buffer.pushed_nodes.size() - 1;
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- Emit(kMips64StoreToStackSlot | MiscField::encode(slot), NULL,
- g.UseRegister(*input));
+ for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
+ ++i) {
+ Emit(kMips64StoreToStackSlot | MiscField::encode(slot), g.NoOutput(),
+ g.UseRegister(*i));
slot--;
}
@@ -708,15 +709,15 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
@@ -752,18 +753,19 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
- length_operand, g.UseRegister(value), g.UseRegister(buffer));
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
+
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
+
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ offset_operand, length_operand, g.UseRegister(value),
+ g.UseRegister(buffer));
}
@@ -771,12 +773,13 @@ namespace {
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
DCHECK(cont->IsSet());
@@ -837,9 +840,9 @@ void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
Node* value, FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
- InstructionOperand* const value_operand = g.UseRegister(value);
+ InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
@@ -909,25 +912,25 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
if (result == NULL || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
@@ -964,6 +967,67 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 10 + 2 * value_range;
+ size_t table_time_cost = 10;
+ size_t lookup_space_cost = 2 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMips64Sub, index_operand, value_operand,
+ g.TempImmediate(min_value));
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -1000,7 +1064,7 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dadd, &cont);
}
@@ -1010,7 +1074,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dsub, &cont);
}
@@ -1049,19 +1113,19 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/mips64/linkage-mips64.cc b/deps/v8/src/compiler/mips64/linkage-mips64.cc
index 0e1a5900c1..273054e206 100644
--- a/deps/v8/src/compiler/mips64/linkage-mips64.cc
+++ b/deps/v8/src/compiler/mips64/linkage-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/assembler.h"
#include "src/code-stubs.h"
#include "src/compiler/linkage.h"
@@ -35,30 +33,32 @@ struct MipsLinkageHelperTraits {
typedef LinkageHelper<MipsLinkageHelperTraits> LH;
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+ return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
+ return LH::GetStubCallDescriptor(isolate, zone, descriptor,
+ stack_parameter_count, flags, properties);
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
+ const MachineSignature* sig) {
return LH::GetSimplifiedCDescriptor(zone, sig);
}
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index 330f32f65d..f4e0513775 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -8,119 +8,98 @@ namespace v8 {
namespace internal {
namespace compiler {
-MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
- : local_zone_(local_zone),
- code_(code),
- temp_vector_0_(local_zone),
- temp_vector_1_(local_zone) {}
-
-
-void MoveOptimizer::Run() {
- // First smash all consecutive moves into the left most move slot.
- for (auto* block : code()->instruction_blocks()) {
- GapInstruction* prev_gap = nullptr;
- for (int index = block->code_start(); index < block->code_end(); ++index) {
- auto instr = code()->instructions()[index];
- if (!instr->IsGapMoves()) {
- if (instr->IsSourcePosition() || instr->IsNop()) continue;
- FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
- prev_gap = nullptr;
- continue;
- }
- auto gap = GapInstruction::cast(instr);
- // Find first non-empty slot.
- int i = GapInstruction::FIRST_INNER_POSITION;
- for (; i <= GapInstruction::LAST_INNER_POSITION; i++) {
- auto move = gap->parallel_moves()[i];
- if (move == nullptr) continue;
- auto move_ops = move->move_operands();
- auto op = move_ops->begin();
- for (; op != move_ops->end(); ++op) {
- if (!op->IsRedundant()) break;
- }
- if (op == move_ops->end()) {
- move_ops->Rewind(0); // Clear this redundant move.
- } else {
- break; // Found index of first non-redundant move.
- }
- }
- // Nothing to do here.
- if (i == GapInstruction::LAST_INNER_POSITION + 1) {
- if (prev_gap != nullptr) {
- // Slide prev_gap down so we always know where to look for it.
- std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
- prev_gap = gap;
- }
- continue;
- }
- // Move the first non-empty gap to position 0.
- std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]);
- auto left = gap->parallel_moves()[0];
- // Compress everything into position 0.
- for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) {
- auto move = gap->parallel_moves()[i];
- if (move == nullptr) continue;
- CompressMoves(&temp_vector_0_, left, move);
- }
- if (prev_gap != nullptr) {
- // Smash left into prev_gap, killing left.
- auto pred_moves = prev_gap->parallel_moves()[0];
- CompressMoves(&temp_vector_0_, pred_moves, left);
- std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
- }
- prev_gap = gap;
- }
- FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
- }
-}
+namespace {
-
-static MoveOperands* PrepareInsertAfter(ParallelMove* left, MoveOperands* move,
- Zone* zone) {
+MoveOperands* PrepareInsertAfter(ParallelMove* left, MoveOperands* move,
+ Zone* zone) {
auto move_ops = left->move_operands();
MoveOperands* replacement = nullptr;
MoveOperands* to_eliminate = nullptr;
for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) {
if (curr->IsEliminated()) continue;
if (curr->destination()->Equals(move->source())) {
- DCHECK_EQ(nullptr, replacement);
+ DCHECK(!replacement);
replacement = curr;
if (to_eliminate != nullptr) break;
} else if (curr->destination()->Equals(move->destination())) {
- DCHECK_EQ(nullptr, to_eliminate);
+ DCHECK(!to_eliminate);
to_eliminate = curr;
if (replacement != nullptr) break;
}
}
DCHECK(!(replacement == to_eliminate && replacement != nullptr));
if (replacement != nullptr) {
- auto new_source = new (zone) InstructionOperand(
- replacement->source()->kind(), replacement->source()->index());
+ auto new_source = InstructionOperand::New(
+ zone, replacement->source()->kind(), replacement->source()->index());
move->set_source(new_source);
}
return to_eliminate;
}
+bool GapsCanMoveOver(Instruction* instr) {
+ DCHECK(!instr->IsGapMoves());
+ return instr->IsSourcePosition() || instr->IsNop();
+}
+
+
+int FindFirstNonEmptySlot(GapInstruction* gap) {
+ int i = GapInstruction::FIRST_INNER_POSITION;
+ for (; i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ auto move = gap->parallel_moves()[i];
+ if (move == nullptr) continue;
+ auto move_ops = move->move_operands();
+ auto op = move_ops->begin();
+ for (; op != move_ops->end(); ++op) {
+ if (!op->IsRedundant()) break;
+ op->Eliminate();
+ }
+ if (op != move_ops->end()) break; // Found non-redundant move.
+ move_ops->Rewind(0); // Clear this redundant move.
+ }
+ return i;
+}
+
+} // namepace
+
+
+MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
+ : local_zone_(local_zone),
+ code_(code),
+ to_finalize_(local_zone),
+ temp_vector_0_(local_zone),
+ temp_vector_1_(local_zone) {}
+
+
+void MoveOptimizer::Run() {
+ for (auto* block : code()->instruction_blocks()) {
+ CompressBlock(block);
+ }
+ for (auto gap : to_finalize_) {
+ FinalizeMoves(gap);
+ }
+}
+
+
void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
ParallelMove* right) {
DCHECK(eliminated->empty());
auto move_ops = right->move_operands();
- // Modify the right moves in place and collect moves that will be killed by
- // merging the two gaps.
- for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
- if (op->IsRedundant()) continue;
- MoveOperands* to_eliminate = PrepareInsertAfter(left, op, code_zone());
- if (to_eliminate != nullptr) {
- eliminated->push_back(to_eliminate);
+ if (!left->move_operands()->is_empty()) {
+ // Modify the right moves in place and collect moves that will be killed by
+ // merging the two gaps.
+ for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
+ if (op->IsRedundant()) continue;
+ MoveOperands* to_eliminate = PrepareInsertAfter(left, op, code_zone());
+ if (to_eliminate != nullptr) eliminated->push_back(to_eliminate);
}
+ // Eliminate dead moves. Must happen before insertion of new moves as the
+ // contents of eliminated are pointers into a list.
+ for (auto to_eliminate : *eliminated) {
+ to_eliminate->Eliminate();
+ }
+ eliminated->clear();
}
- // Eliminate dead moves. Must happen before insertion of new moves as the
- // contents of eliminated are pointers into a list.
- for (auto to_eliminate : *eliminated) {
- to_eliminate->Eliminate();
- }
- eliminated->clear();
// Add all possibly modified moves from right side.
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
@@ -131,13 +110,60 @@ void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
}
-void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
- GapInstruction* gap) {
- DCHECK(loads->empty());
- DCHECK(new_moves->empty());
- if (gap == nullptr) return;
- // Split multiple loads of the same constant or stack slot off into the second
- // slot and keep remaining moves in the first slot.
+// Smash all consecutive moves into the left most move slot and accumulate them
+// as much as possible across instructions.
+void MoveOptimizer::CompressBlock(InstructionBlock* block) {
+ auto temp_vector = temp_vector_0();
+ DCHECK(temp_vector.empty());
+ GapInstruction* prev_gap = nullptr;
+ for (int index = block->code_start(); index < block->code_end(); ++index) {
+ auto instr = code()->instructions()[index];
+ if (!instr->IsGapMoves()) {
+ if (GapsCanMoveOver(instr)) continue;
+ if (prev_gap != nullptr) to_finalize_.push_back(prev_gap);
+ prev_gap = nullptr;
+ continue;
+ }
+ auto gap = GapInstruction::cast(instr);
+ int i = FindFirstNonEmptySlot(gap);
+ // Nothing to do here.
+ if (i == GapInstruction::LAST_INNER_POSITION + 1) {
+ if (prev_gap != nullptr) {
+ // Slide prev_gap down so we always know where to look for it.
+ std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
+ prev_gap = gap;
+ }
+ continue;
+ }
+ // Move the first non-empty gap to position 0.
+ std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]);
+ auto left = gap->parallel_moves()[0];
+ // Compress everything into position 0.
+ for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) {
+ auto move = gap->parallel_moves()[i];
+ if (move == nullptr) continue;
+ CompressMoves(&temp_vector, left, move);
+ }
+ if (prev_gap != nullptr) {
+ // Smash left into prev_gap, killing left.
+ auto pred_moves = prev_gap->parallel_moves()[0];
+ CompressMoves(&temp_vector, pred_moves, left);
+ // Slide prev_gap down so we always know where to look for it.
+ std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
+ }
+ prev_gap = gap;
+ }
+ if (prev_gap != nullptr) to_finalize_.push_back(prev_gap);
+}
+
+
+// Split multiple loads of the same constant or stack slot off into the second
+// slot and keep remaining moves in the first slot.
+void MoveOptimizer::FinalizeMoves(GapInstruction* gap) {
+ auto loads = temp_vector_0();
+ DCHECK(loads.empty());
+ auto new_moves = temp_vector_1();
+ DCHECK(new_moves.empty());
auto move_ops = gap->parallel_moves()[0]->move_operands();
for (auto move = move_ops->begin(); move != move_ops->end(); ++move) {
if (move->IsRedundant()) {
@@ -149,7 +175,7 @@ void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
continue;
// Search for existing move to this slot.
MoveOperands* found = nullptr;
- for (auto load : *loads) {
+ for (auto load : loads) {
if (load->source()->Equals(move->source())) {
found = load;
break;
@@ -157,11 +183,11 @@ void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
}
// Not found so insert.
if (found == nullptr) {
- loads->push_back(move);
+ loads.push_back(move);
// Replace source with copy for later use.
auto dest = move->destination();
- move->set_destination(new (code_zone())
- InstructionOperand(dest->kind(), dest->index()));
+ move->set_destination(
+ InstructionOperand::New(code_zone(), dest->kind(), dest->index()));
continue;
}
if ((found->destination()->IsStackSlot() ||
@@ -173,31 +199,31 @@ void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
InstructionOperand::Kind found_kind = found->destination()->kind();
int found_index = found->destination()->index();
auto next_dest =
- new (code_zone()) InstructionOperand(found_kind, found_index);
+ InstructionOperand::New(code_zone(), found_kind, found_index);
auto dest = move->destination();
found->destination()->ConvertTo(dest->kind(), dest->index());
move->set_destination(next_dest);
}
// move from load destination.
move->set_source(found->destination());
- new_moves->push_back(move);
+ new_moves.push_back(move);
}
- loads->clear();
- if (new_moves->empty()) return;
+ loads.clear();
+ if (new_moves.empty()) return;
// Insert all new moves into slot 1.
auto slot_1 = gap->GetOrCreateParallelMove(
static_cast<GapInstruction::InnerPosition>(1), code_zone());
DCHECK(slot_1->move_operands()->is_empty());
slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr),
- static_cast<int>(new_moves->size()),
+ static_cast<int>(new_moves.size()),
code_zone());
auto it = slot_1->move_operands()->begin();
- for (auto new_move : *new_moves) {
+ for (auto new_move : new_moves) {
std::swap(*new_move, *it);
++it;
}
DCHECK_EQ(it, slot_1->move_operands()->end());
- new_moves->clear();
+ new_moves.clear();
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/move-optimizer.h
index bbce6867d6..2bde09eae5 100644
--- a/deps/v8/src/compiler/move-optimizer.h
+++ b/deps/v8/src/compiler/move-optimizer.h
@@ -19,18 +19,22 @@ class MoveOptimizer FINAL {
private:
typedef ZoneVector<MoveOperands*> MoveOpVector;
+ typedef ZoneVector<GapInstruction*> GapInstructions;
InstructionSequence* code() const { return code_; }
Zone* local_zone() const { return local_zone_; }
Zone* code_zone() const { return code()->zone(); }
+ MoveOpVector& temp_vector_0() { return temp_vector_0_; }
+ MoveOpVector& temp_vector_1() { return temp_vector_1_; }
+ void CompressBlock(InstructionBlock* blocke);
void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
ParallelMove* right);
- void FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
- GapInstruction* gap);
+ void FinalizeMoves(GapInstruction* gap);
Zone* const local_zone_;
InstructionSequence* const code_;
+ GapInstructions to_finalize_;
MoveOpVector temp_vector_0_;
MoveOpVector temp_vector_1_;
diff --git a/deps/v8/src/compiler/node-aux-data-inl.h b/deps/v8/src/compiler/node-aux-data-inl.h
deleted file mode 100644
index d8db4b949f..0000000000
--- a/deps/v8/src/compiler/node-aux-data-inl.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_NODE_AUX_DATA_INL_H_
-#define V8_COMPILER_NODE_AUX_DATA_INL_H_
-
-#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-aux-data.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <class T>
-NodeAuxData<T>::NodeAuxData(Zone* zone)
- : aux_data_(zone) {}
-
-
-template <class T>
-void NodeAuxData<T>::Set(Node* node, const T& data) {
- int id = node->id();
- if (id >= static_cast<int>(aux_data_.size())) {
- aux_data_.resize(id + 1);
- }
- aux_data_[id] = data;
-}
-
-
-template <class T>
-T NodeAuxData<T>::Get(Node* node) const {
- int id = node->id();
- if (id >= static_cast<int>(aux_data_.size())) {
- return T();
- }
- return aux_data_[id];
-}
-}
-}
-} // namespace v8::internal::compiler
-
-#endif
diff --git a/deps/v8/src/compiler/node-aux-data.h b/deps/v8/src/compiler/node-aux-data.h
index a08dc580ff..6c236aa4da 100644
--- a/deps/v8/src/compiler/node-aux-data.h
+++ b/deps/v8/src/compiler/node-aux-data.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_NODE_AUX_DATA_H_
#define V8_COMPILER_NODE_AUX_DATA_H_
+#include "src/compiler/node.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -12,22 +13,30 @@ namespace internal {
namespace compiler {
// Forward declarations.
-class Graph;
class Node;
template <class T>
class NodeAuxData {
public:
- inline explicit NodeAuxData(Zone* zone);
+ explicit NodeAuxData(Zone* zone) : aux_data_(zone) {}
- inline void Set(Node* node, const T& data);
- inline T Get(Node* node) const;
+ void Set(Node* node, T const& data) {
+ size_t const id = node->id();
+ if (id >= aux_data_.size()) aux_data_.resize(id + 1);
+ aux_data_[id] = data;
+ }
+
+ T Get(Node* node) const {
+ size_t const id = node->id();
+ return (id < aux_data_.size()) ? aux_data_[id] : T();
+ }
private:
ZoneVector<T> aux_data_;
};
-}
-}
-} // namespace v8::internal::compiler
-#endif
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_NODE_AUX_DATA_H_
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
index 92a3fa078f..79c342b44e 100644
--- a/deps/v8/src/compiler/node-cache.cc
+++ b/deps/v8/src/compiler/node-cache.cc
@@ -36,7 +36,7 @@ bool NodeCache<Key, Hash, Pred>::Resize(Zone* zone) {
size_t old_size = size_ + kLinearProbe;
size_ *= 4;
size_t num_entries = size_ + kLinearProbe;
- entries_ = zone->NewArray<Entry>(static_cast<int>(num_entries));
+ entries_ = zone->NewArray<Entry>(num_entries);
memset(entries_, 0, sizeof(Entry) * num_entries);
// Insert the old entries into the new block.
@@ -66,7 +66,7 @@ Node** NodeCache<Key, Hash, Pred>::Find(Zone* zone, Key key) {
if (!entries_) {
// Allocate the initial entries and insert the first entry.
size_t num_entries = kInitialSize + kLinearProbe;
- entries_ = zone->NewArray<Entry>(static_cast<int>(num_entries));
+ entries_ = zone->NewArray<Entry>(num_entries);
size_ = kInitialSize;
memset(entries_, 0, sizeof(Entry) * num_entries);
Entry* entry = &entries_[hash & (kInitialSize - 1)];
diff --git a/deps/v8/src/compiler/node-marker.cc b/deps/v8/src/compiler/node-marker.cc
new file mode 100644
index 0000000000..4bf12f9751
--- /dev/null
+++ b/deps/v8/src/compiler/node-marker.cc
@@ -0,0 +1,40 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-marker.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+NodeMarkerBase::NodeMarkerBase(Graph* graph, uint32_t num_states)
+ : mark_min_(graph->mark_max_), mark_max_(graph->mark_max_ += num_states) {
+ DCHECK(num_states > 0); // user error!
+ DCHECK(mark_max_ > mark_min_); // check for wraparound.
+}
+
+
+Mark NodeMarkerBase::Get(Node* node) {
+ Mark mark = node->mark();
+ if (mark < mark_min_) {
+ mark = mark_min_;
+ node->set_mark(mark_min_);
+ }
+ DCHECK_LT(mark, mark_max_);
+ return mark - mark_min_;
+}
+
+
+void NodeMarkerBase::Set(Node* node, Mark mark) {
+ DCHECK_LT(mark, mark_max_ - mark_min_);
+ DCHECK_LT(node->mark(), mark_max_);
+ node->set_mark(mark + mark_min_);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/node-marker.h b/deps/v8/src/compiler/node-marker.h
new file mode 100644
index 0000000000..837c92cfc2
--- /dev/null
+++ b/deps/v8/src/compiler/node-marker.h
@@ -0,0 +1,62 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_MARKER_H_
+#define V8_COMPILER_NODE_MARKER_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+
+// Marks are used during traversal of the graph to distinguish states of nodes.
+// Each node has a mark which is a monotonically increasing integer, and a
+// {NodeMarker} has a range of values that indicate states of a node.
+typedef uint32_t Mark;
+
+
+// Base class for templatized NodeMarkers.
+class NodeMarkerBase {
+ public:
+ NodeMarkerBase(Graph* graph, uint32_t num_states);
+
+ Mark Get(Node* node);
+ void Set(Node* node, Mark mark);
+
+ private:
+ Mark mark_min_;
+ Mark mark_max_;
+
+ DISALLOW_COPY_AND_ASSIGN(NodeMarkerBase);
+};
+
+
+// A NodeMarker uses monotonically increasing marks to assign local "states"
+// to nodes. Only one NodeMarker per graph is valid at a given time.
+template <typename State>
+class NodeMarker : public NodeMarkerBase {
+ public:
+ NodeMarker(Graph* graph, uint32_t num_states)
+ : NodeMarkerBase(graph, num_states) {}
+
+ State Get(Node* node) {
+ return static_cast<State>(NodeMarkerBase::Get(node));
+ }
+
+ void Set(Node* node, State state) {
+ NodeMarkerBase::Set(node, static_cast<Mark>(state));
+ }
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_NODE_MARKER_H_
diff --git a/deps/v8/src/compiler/node-properties-inl.h b/deps/v8/src/compiler/node-properties-inl.h
deleted file mode 100644
index 0d296141e1..0000000000
--- a/deps/v8/src/compiler/node-properties-inl.h
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_NODE_PROPERTIES_INL_H_
-#define V8_COMPILER_NODE_PROPERTIES_INL_H_
-
-#include "src/v8.h"
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
-#include "src/compiler/operator-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// -----------------------------------------------------------------------------
-// Input layout.
-// Inputs are always arranged in order as follows:
-// 0 [ values, context, effects, control ] node->InputCount()
-
-inline int NodeProperties::FirstValueIndex(Node* node) { return 0; }
-
-inline int NodeProperties::FirstContextIndex(Node* node) {
- return PastValueIndex(node);
-}
-
-inline int NodeProperties::FirstFrameStateIndex(Node* node) {
- return PastContextIndex(node);
-}
-
-inline int NodeProperties::FirstEffectIndex(Node* node) {
- return PastFrameStateIndex(node);
-}
-
-inline int NodeProperties::FirstControlIndex(Node* node) {
- return PastEffectIndex(node);
-}
-
-
-inline int NodeProperties::PastValueIndex(Node* node) {
- return FirstValueIndex(node) + node->op()->ValueInputCount();
-}
-
-inline int NodeProperties::PastContextIndex(Node* node) {
- return FirstContextIndex(node) +
- OperatorProperties::GetContextInputCount(node->op());
-}
-
-inline int NodeProperties::PastFrameStateIndex(Node* node) {
- return FirstFrameStateIndex(node) +
- OperatorProperties::GetFrameStateInputCount(node->op());
-}
-
-inline int NodeProperties::PastEffectIndex(Node* node) {
- return FirstEffectIndex(node) + node->op()->EffectInputCount();
-}
-
-inline int NodeProperties::PastControlIndex(Node* node) {
- return FirstControlIndex(node) + node->op()->ControlInputCount();
-}
-
-
-// -----------------------------------------------------------------------------
-// Input accessors.
-
-inline Node* NodeProperties::GetValueInput(Node* node, int index) {
- DCHECK(0 <= index && index < node->op()->ValueInputCount());
- return node->InputAt(FirstValueIndex(node) + index);
-}
-
-inline Node* NodeProperties::GetContextInput(Node* node) {
- DCHECK(OperatorProperties::HasContextInput(node->op()));
- return node->InputAt(FirstContextIndex(node));
-}
-
-inline Node* NodeProperties::GetFrameStateInput(Node* node) {
- DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
- return node->InputAt(FirstFrameStateIndex(node));
-}
-
-inline Node* NodeProperties::GetEffectInput(Node* node, int index) {
- DCHECK(0 <= index && index < node->op()->EffectInputCount());
- return node->InputAt(FirstEffectIndex(node) + index);
-}
-
-inline Node* NodeProperties::GetControlInput(Node* node, int index) {
- DCHECK(0 <= index && index < node->op()->ControlInputCount());
- return node->InputAt(FirstControlIndex(node) + index);
-}
-
-inline int NodeProperties::GetFrameStateIndex(Node* node) {
- DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
- return FirstFrameStateIndex(node);
-}
-
-// -----------------------------------------------------------------------------
-// Edge kinds.
-
-inline bool NodeProperties::IsInputRange(Edge edge, int first, int num) {
- // TODO(titzer): edge.index() is linear time;
- // edges maybe need to be marked as value/effect/control.
- if (num == 0) return false;
- int index = edge.index();
- return first <= index && index < first + num;
-}
-
-inline bool NodeProperties::IsValueEdge(Edge edge) {
- Node* node = edge.from();
- return IsInputRange(edge, FirstValueIndex(node),
- node->op()->ValueInputCount());
-}
-
-inline bool NodeProperties::IsContextEdge(Edge edge) {
- Node* node = edge.from();
- return IsInputRange(edge, FirstContextIndex(node),
- OperatorProperties::GetContextInputCount(node->op()));
-}
-
-inline bool NodeProperties::IsEffectEdge(Edge edge) {
- Node* node = edge.from();
- return IsInputRange(edge, FirstEffectIndex(node),
- node->op()->EffectInputCount());
-}
-
-inline bool NodeProperties::IsControlEdge(Edge edge) {
- Node* node = edge.from();
- return IsInputRange(edge, FirstControlIndex(node),
- node->op()->ControlInputCount());
-}
-
-
-// -----------------------------------------------------------------------------
-// Miscellaneous predicates.
-
-inline bool NodeProperties::IsControl(Node* node) {
- return IrOpcode::IsControlOpcode(node->opcode());
-}
-
-
-// -----------------------------------------------------------------------------
-// Miscellaneous mutators.
-
-inline void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
- node->ReplaceInput(FirstControlIndex(node), control);
-}
-
-inline void NodeProperties::ReplaceEffectInput(Node* node, Node* effect,
- int index) {
- DCHECK(index < node->op()->EffectInputCount());
- return node->ReplaceInput(FirstEffectIndex(node) + index, effect);
-}
-
-inline void NodeProperties::ReplaceFrameStateInput(Node* node,
- Node* frame_state) {
- DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
- node->ReplaceInput(FirstFrameStateIndex(node), frame_state);
-}
-
-inline void NodeProperties::RemoveNonValueInputs(Node* node) {
- node->TrimInputCount(node->op()->ValueInputCount());
-}
-
-
-// Replace value uses of {node} with {value} and effect uses of {node} with
-// {effect}. If {effect == NULL}, then use the effect input to {node}.
-inline void NodeProperties::ReplaceWithValue(Node* node, Node* value,
- Node* effect) {
- DCHECK(node->op()->ControlOutputCount() == 0);
- if (effect == NULL && node->op()->EffectInputCount() > 0) {
- effect = NodeProperties::GetEffectInput(node);
- }
-
- // Requires distinguishing between value and effect edges.
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- DCHECK_NE(NULL, effect);
- edge.UpdateTo(effect);
- } else {
- edge.UpdateTo(value);
- }
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Type Bounds.
-
-inline bool NodeProperties::IsTyped(Node* node) {
- Bounds bounds = node->bounds();
- DCHECK((bounds.lower == NULL) == (bounds.upper == NULL));
- return bounds.upper != NULL;
-}
-
-inline Bounds NodeProperties::GetBounds(Node* node) {
- DCHECK(IsTyped(node));
- return node->bounds();
-}
-
-inline void NodeProperties::RemoveBounds(Node* node) {
- Bounds empty;
- node->set_bounds(empty);
-}
-
-inline void NodeProperties::SetBounds(Node* node, Bounds b) {
- DCHECK(b.lower != NULL && b.upper != NULL);
- node->set_bounds(b);
-}
-
-inline bool NodeProperties::AllValueInputsAreTyped(Node* node) {
- int input_count = node->op()->ValueInputCount();
- for (int i = 0; i < input_count; ++i) {
- if (!IsTyped(GetValueInput(node, i))) return false;
- }
- return true;
-}
-
-
-}
-}
-} // namespace v8::internal::compiler
-
-#endif // V8_COMPILER_NODE_PROPERTIES_INL_H_
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
new file mode 100644
index 0000000000..47de74e329
--- /dev/null
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -0,0 +1,247 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-properties.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// static
+int NodeProperties::PastValueIndex(Node* node) {
+ return FirstValueIndex(node) + node->op()->ValueInputCount();
+}
+
+
+// static
+int NodeProperties::PastContextIndex(Node* node) {
+ return FirstContextIndex(node) +
+ OperatorProperties::GetContextInputCount(node->op());
+}
+
+
+// static
+int NodeProperties::PastFrameStateIndex(Node* node) {
+ return FirstFrameStateIndex(node) +
+ OperatorProperties::GetFrameStateInputCount(node->op());
+}
+
+
+// static
+int NodeProperties::PastEffectIndex(Node* node) {
+ return FirstEffectIndex(node) + node->op()->EffectInputCount();
+}
+
+
+// static
+int NodeProperties::PastControlIndex(Node* node) {
+ return FirstControlIndex(node) + node->op()->ControlInputCount();
+}
+
+
+// static
+Node* NodeProperties::GetValueInput(Node* node, int index) {
+ DCHECK(0 <= index && index < node->op()->ValueInputCount());
+ return node->InputAt(FirstValueIndex(node) + index);
+}
+
+
+// static
+Node* NodeProperties::GetContextInput(Node* node) {
+ DCHECK(OperatorProperties::HasContextInput(node->op()));
+ return node->InputAt(FirstContextIndex(node));
+}
+
+
+// static
+Node* NodeProperties::GetFrameStateInput(Node* node) {
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ return node->InputAt(FirstFrameStateIndex(node));
+}
+
+
+// static
+Node* NodeProperties::GetEffectInput(Node* node, int index) {
+ DCHECK(0 <= index && index < node->op()->EffectInputCount());
+ return node->InputAt(FirstEffectIndex(node) + index);
+}
+
+
+// static
+Node* NodeProperties::GetControlInput(Node* node, int index) {
+ DCHECK(0 <= index && index < node->op()->ControlInputCount());
+ return node->InputAt(FirstControlIndex(node) + index);
+}
+
+
+// static
+bool NodeProperties::IsValueEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstValueIndex(node),
+ node->op()->ValueInputCount());
+}
+
+
+// static
+bool NodeProperties::IsContextEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstContextIndex(node),
+ OperatorProperties::GetContextInputCount(node->op()));
+}
+
+
+// static
+bool NodeProperties::IsFrameStateEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstFrameStateIndex(node),
+ OperatorProperties::GetFrameStateInputCount(node->op()));
+}
+
+
+// static
+bool NodeProperties::IsEffectEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstEffectIndex(node),
+ node->op()->EffectInputCount());
+}
+
+
+// static
+bool NodeProperties::IsControlEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstControlIndex(node),
+ node->op()->ControlInputCount());
+}
+
+
+// static
+void NodeProperties::ReplaceContextInput(Node* node, Node* context) {
+ node->ReplaceInput(FirstContextIndex(node), context);
+}
+
+
+// static
+void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
+ node->ReplaceInput(FirstControlIndex(node), control);
+}
+
+
+// static
+void NodeProperties::ReplaceEffectInput(Node* node, Node* effect, int index) {
+ DCHECK(index < node->op()->EffectInputCount());
+ return node->ReplaceInput(FirstEffectIndex(node) + index, effect);
+}
+
+
+// static
+void NodeProperties::ReplaceFrameStateInput(Node* node, Node* frame_state) {
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ node->ReplaceInput(FirstFrameStateIndex(node), frame_state);
+}
+
+
+// static
+void NodeProperties::RemoveNonValueInputs(Node* node) {
+ node->TrimInputCount(node->op()->ValueInputCount());
+}
+
+
+// static
+void NodeProperties::ReplaceWithValue(Node* node, Node* value, Node* effect) {
+ DCHECK(node->op()->ControlOutputCount() == 0);
+ if (!effect && node->op()->EffectInputCount() > 0) {
+ effect = NodeProperties::GetEffectInput(node);
+ }
+
+ // Requires distinguishing between value and effect edges.
+ for (Edge edge : node->use_edges()) {
+ if (IsEffectEdge(edge)) {
+ DCHECK_NOT_NULL(effect);
+ edge.UpdateTo(effect);
+ } else {
+ edge.UpdateTo(value);
+ }
+ }
+}
+
+
+// static
+Node* NodeProperties::FindProjection(Node* node, size_t projection_index) {
+ for (auto use : node->uses()) {
+ if (use->opcode() == IrOpcode::kProjection &&
+ ProjectionIndexOf(use->op()) == projection_index) {
+ return use;
+ }
+ }
+ return nullptr;
+}
+
+
+// static
+void NodeProperties::CollectControlProjections(Node* node, Node** projections,
+ size_t projection_count) {
+#ifdef DEBUG
+ DCHECK_EQ(static_cast<int>(projection_count), node->UseCount());
+ std::memset(projections, 0, sizeof(*projections) * projection_count);
+#endif
+ size_t if_value_index = 0;
+ for (Node* const use : node->uses()) {
+ size_t index;
+ switch (use->opcode()) {
+ default:
+ UNREACHABLE();
+ // Fall through.
+ case IrOpcode::kIfTrue:
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ index = 0;
+ break;
+ case IrOpcode::kIfFalse:
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ index = 1;
+ break;
+ case IrOpcode::kIfValue:
+ DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
+ index = if_value_index++;
+ break;
+ case IrOpcode::kIfDefault:
+ DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
+ index = projection_count - 1;
+ break;
+ }
+ DCHECK_LT(if_value_index, projection_count);
+ DCHECK_LT(index, projection_count);
+ DCHECK_NULL(projections[index]);
+ projections[index] = use;
+ }
+#ifdef DEBUG
+ for (size_t index = 0; index < projection_count; ++index) {
+ DCHECK_NOT_NULL(projections[index]);
+ }
+#endif
+}
+
+
+// static
+bool NodeProperties::AllValueInputsAreTyped(Node* node) {
+ int input_count = node->op()->ValueInputCount();
+ for (int index = 0; index < input_count; ++index) {
+ if (!IsTyped(GetValueInput(node, index))) return false;
+ }
+ return true;
+}
+
+
+// static
+bool NodeProperties::IsInputRange(Edge edge, int first, int num) {
+ if (num == 0) return false;
+ int const index = edge.index();
+ return first <= index && index < first + num;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 025be7857c..a13eea3a02 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -15,48 +15,109 @@ namespace compiler {
class Operator;
// A facade that simplifies access to the different kinds of inputs to a node.
-class NodeProperties {
+class NodeProperties FINAL {
public:
- static inline Node* GetValueInput(Node* node, int index);
- static inline Node* GetContextInput(Node* node);
- static inline Node* GetFrameStateInput(Node* node);
- static inline Node* GetEffectInput(Node* node, int index = 0);
- static inline Node* GetControlInput(Node* node, int index = 0);
-
- static inline int GetFrameStateIndex(Node* node);
-
- static inline bool IsValueEdge(Edge edge);
- static inline bool IsContextEdge(Edge edge);
- static inline bool IsEffectEdge(Edge edge);
- static inline bool IsControlEdge(Edge edge);
-
- static inline bool IsControl(Node* node);
-
- static inline void ReplaceControlInput(Node* node, Node* control);
- static inline void ReplaceEffectInput(Node* node, Node* effect,
- int index = 0);
- static inline void ReplaceFrameStateInput(Node* node, Node* frame_state);
- static inline void RemoveNonValueInputs(Node* node);
- static inline void ReplaceWithValue(Node* node, Node* value,
- Node* effect = NULL);
-
- static inline bool IsTyped(Node* node);
- static inline Bounds GetBounds(Node* node);
- static inline void SetBounds(Node* node, Bounds bounds);
- static inline void RemoveBounds(Node* node);
- static inline bool AllValueInputsAreTyped(Node* node);
-
- static inline int FirstValueIndex(Node* node);
- static inline int FirstContextIndex(Node* node);
- static inline int FirstFrameStateIndex(Node* node);
- static inline int FirstEffectIndex(Node* node);
- static inline int FirstControlIndex(Node* node);
- static inline int PastValueIndex(Node* node);
- static inline int PastContextIndex(Node* node);
- static inline int PastFrameStateIndex(Node* node);
- static inline int PastEffectIndex(Node* node);
- static inline int PastControlIndex(Node* node);
+ // ---------------------------------------------------------------------------
+ // Input layout.
+ // Inputs are always arranged in order as follows:
+ // 0 [ values, context, frame state, effects, control ] node->InputCount()
+ static int FirstValueIndex(Node* node) { return 0; }
+ static int FirstContextIndex(Node* node) { return PastValueIndex(node); }
+ static int FirstFrameStateIndex(Node* node) { return PastContextIndex(node); }
+ static int FirstEffectIndex(Node* node) { return PastFrameStateIndex(node); }
+ static int FirstControlIndex(Node* node) { return PastEffectIndex(node); }
+ static int PastValueIndex(Node* node);
+ static int PastContextIndex(Node* node);
+ static int PastFrameStateIndex(Node* node);
+ static int PastEffectIndex(Node* node);
+ static int PastControlIndex(Node* node);
+
+
+ // ---------------------------------------------------------------------------
+ // Input accessors.
+
+ static Node* GetValueInput(Node* node, int index);
+ static Node* GetContextInput(Node* node);
+ static Node* GetFrameStateInput(Node* node);
+ static Node* GetEffectInput(Node* node, int index = 0);
+ static Node* GetControlInput(Node* node, int index = 0);
+
+
+ // ---------------------------------------------------------------------------
+ // Edge kinds.
+
+ static bool IsValueEdge(Edge edge);
+ static bool IsContextEdge(Edge edge);
+ static bool IsFrameStateEdge(Edge edge);
+ static bool IsEffectEdge(Edge edge);
+ static bool IsControlEdge(Edge edge);
+
+
+ // ---------------------------------------------------------------------------
+ // Miscellaneous predicates.
+
+ static bool IsCommon(Node* node) {
+ return IrOpcode::IsCommonOpcode(node->opcode());
+ }
+ static bool IsControl(Node* node) {
+ return IrOpcode::IsControlOpcode(node->opcode());
+ }
+ static bool IsConstant(Node* node) {
+ return IrOpcode::IsConstantOpcode(node->opcode());
+ }
+ static bool IsPhi(Node* node) {
+ return IrOpcode::IsPhiOpcode(node->opcode());
+ }
+
+
+ // ---------------------------------------------------------------------------
+ // Miscellaneous mutators.
+
+ static void ReplaceContextInput(Node* node, Node* context);
+ static void ReplaceControlInput(Node* node, Node* control);
+ static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
+ static void ReplaceFrameStateInput(Node* node, Node* frame_state);
+ static void RemoveNonValueInputs(Node* node);
+
+ // Replace value uses of {node} with {value} and effect uses of {node} with
+ // {effect}. If {effect == NULL}, then use the effect input to {node}.
+ static void ReplaceWithValue(Node* node, Node* value, Node* effect = nullptr);
+
+
+ // ---------------------------------------------------------------------------
+ // Miscellaneous utilities.
+
+ static Node* FindProjection(Node* node, size_t projection_index);
+
+ // Collect the branch-related projections from a node, such as IfTrue,
+ // IfFalse, IfValue and IfDefault.
+ // - Branch: [ IfTrue, IfFalse ]
+ // - Switch: [ IfValue, ..., IfDefault ]
+ static void CollectControlProjections(Node* node, Node** proj, size_t count);
+
+
+ // ---------------------------------------------------------------------------
+ // Type Bounds.
+
+ static bool IsTyped(Node* node) {
+ Bounds const bounds = node->bounds();
+ DCHECK(!bounds.lower == !bounds.upper);
+ return bounds.upper;
+ }
+ static Bounds GetBounds(Node* node) {
+ DCHECK(IsTyped(node));
+ return node->bounds();
+ }
+ static void SetBounds(Node* node, Bounds bounds) {
+ DCHECK_NOT_NULL(bounds.lower);
+ DCHECK_NOT_NULL(bounds.upper);
+ node->set_bounds(bounds);
+ }
+ static void RemoveBounds(Node* node) { node->set_bounds(Bounds()); }
+ static bool AllValueInputsAreTyped(Node* node);
+
+ private:
static inline bool IsInputRange(Edge edge, int first, int count);
};
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 8f44c24fd2..d38e9ceff7 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -4,37 +4,23 @@
#include "src/compiler/node.h"
-#include "src/compiler/graph.h"
-#include "src/zone.h"
+#include <algorithm>
namespace v8 {
namespace internal {
namespace compiler {
-Node::Node(NodeId id, int input_count, int reserved_input_count)
- : id_(id),
- bit_field_(InputCountField::encode(input_count) |
- ReservedInputCountField::encode(reserved_input_count) |
- HasAppendableInputsField::encode(false)),
- first_use_(nullptr),
- last_use_(nullptr) {
- inputs_.static_ = reinterpret_cast<Input*>(this + 1);
-}
-
-
-Node* Node::New(Graph* graph, int input_count, Node** inputs,
- bool has_extensible_inputs) {
- size_t node_size = sizeof(Node);
+Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
+ Node** inputs, bool has_extensible_inputs) {
+ size_t node_size = sizeof(Node) - sizeof(Input);
int reserve_input_count = has_extensible_inputs ? kDefaultReservedInputs : 0;
- size_t inputs_size = (input_count + reserve_input_count) * sizeof(Input);
+ size_t inputs_size = std::max<size_t>(
+ (input_count + reserve_input_count) * sizeof(Input), sizeof(InputDeque*));
size_t uses_size = input_count * sizeof(Use);
int size = static_cast<int>(node_size + inputs_size + uses_size);
- Zone* zone = graph->zone();
void* buffer = zone->New(size);
- Node* result =
- new (buffer) Node(graph->NextNodeID(), input_count, reserve_input_count);
- Input* input =
- reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
+ Node* result = new (buffer) Node(id, op, input_count, reserve_input_count);
+ Input* input = result->inputs_.static_;
Use* use =
reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
@@ -59,28 +45,67 @@ void Node::Kill() {
}
-void Node::CollectProjections(NodeVector* projections) {
- for (size_t i = 0; i < projections->size(); i++) {
- (*projections)[i] = NULL;
+void Node::AppendInput(Zone* zone, Node* new_to) {
+ DCHECK_NOT_NULL(zone);
+ DCHECK_NOT_NULL(new_to);
+ Use* new_use = new (zone) Use;
+ Input new_input;
+ new_input.to = new_to;
+ new_input.use = new_use;
+ if (reserved_input_count() > 0) {
+ DCHECK(!has_appendable_inputs());
+ set_reserved_input_count(reserved_input_count() - 1);
+ inputs_.static_[input_count()] = new_input;
+ } else {
+ EnsureAppendableInputs(zone);
+ inputs_.appendable_->push_back(new_input);
}
- for (UseIter i = uses().begin(); i != uses().end(); ++i) {
- if ((*i)->opcode() != IrOpcode::kProjection) continue;
- size_t index = OpParameter<size_t>(*i);
- DCHECK_LT(index, projections->size());
- DCHECK_EQ(NULL, (*projections)[index]);
- (*projections)[index] = *i;
+ new_use->input_index = input_count();
+ new_use->from = this;
+ new_to->AppendUse(new_use);
+ set_input_count(input_count() + 1);
+}
+
+
+void Node::InsertInput(Zone* zone, int index, Node* new_to) {
+ DCHECK_NOT_NULL(zone);
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, InputCount());
+ AppendInput(zone, InputAt(InputCount() - 1));
+ for (int i = InputCount() - 1; i > index; --i) {
+ ReplaceInput(i, InputAt(i - 1));
}
+ ReplaceInput(index, new_to);
}
-Node* Node::FindProjection(size_t projection_index) {
- for (UseIter i = uses().begin(); i != uses().end(); ++i) {
- if ((*i)->opcode() == IrOpcode::kProjection &&
- OpParameter<size_t>(*i) == projection_index) {
- return *i;
- }
+void Node::RemoveInput(int index) {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, InputCount());
+ for (; index < InputCount() - 1; ++index) {
+ ReplaceInput(index, InputAt(index + 1));
+ }
+ TrimInputCount(InputCount() - 1);
+}
+
+
+void Node::RemoveAllInputs() {
+ for (Edge edge : input_edges()) edge.UpdateTo(nullptr);
+}
+
+
+void Node::TrimInputCount(int new_input_count) {
+ DCHECK_LE(new_input_count, input_count());
+ if (new_input_count == input_count()) return; // Nothing to do.
+ for (int index = new_input_count; index < input_count(); ++index) {
+ ReplaceInput(index, nullptr);
+ }
+ if (!has_appendable_inputs()) {
+ set_reserved_input_count(std::min<int>(
+ ReservedInputCountField::kMax,
+ reserved_input_count() + (input_count() - new_input_count)));
}
- return NULL;
+ set_input_count(new_input_count);
}
@@ -96,11 +121,100 @@ int Node::UseCount() const {
Node* Node::UseAt(int index) const {
DCHECK_LE(0, index);
DCHECK_LT(index, UseCount());
- Use* current = first_use_;
+ const Use* use = first_use_;
while (index-- != 0) {
- current = current->next;
+ use = use->next;
+ }
+ return use->from;
+}
+
+
+void Node::ReplaceUses(Node* replace_to) {
+ for (Use* use = first_use_; use; use = use->next) {
+ use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+ }
+ if (!replace_to->last_use_) {
+ DCHECK(!replace_to->first_use_);
+ replace_to->first_use_ = first_use_;
+ replace_to->last_use_ = last_use_;
+ } else if (first_use_) {
+ DCHECK_NOT_NULL(replace_to->first_use_);
+ replace_to->last_use_->next = first_use_;
+ first_use_->prev = replace_to->last_use_;
+ replace_to->last_use_ = last_use_;
+ }
+ first_use_ = nullptr;
+ last_use_ = nullptr;
+}
+
+
+void Node::Input::Update(Node* new_to) {
+ Node* old_to = this->to;
+ if (new_to == old_to) return; // Nothing to do.
+ // Snip out the use from where it used to be
+ if (old_to) {
+ old_to->RemoveUse(use);
+ }
+ to = new_to;
+ // And put it into the new node's use list.
+ if (new_to) {
+ new_to->AppendUse(use);
+ } else {
+ use->next = nullptr;
+ use->prev = nullptr;
+ }
+}
+
+
+Node::Node(NodeId id, const Operator* op, int input_count,
+ int reserved_input_count)
+ : op_(op),
+ mark_(0),
+ id_(id),
+ bit_field_(InputCountField::encode(input_count) |
+ ReservedInputCountField::encode(reserved_input_count) |
+ HasAppendableInputsField::encode(false)),
+ first_use_(nullptr),
+ last_use_(nullptr) {}
+
+
+void Node::EnsureAppendableInputs(Zone* zone) {
+ if (!has_appendable_inputs()) {
+ void* deque_buffer = zone->New(sizeof(InputDeque));
+ InputDeque* deque = new (deque_buffer) InputDeque(zone);
+ for (int i = 0; i < input_count(); ++i) {
+ deque->push_back(inputs_.static_[i]);
+ }
+ inputs_.appendable_ = deque;
+ set_has_appendable_inputs(true);
+ }
+}
+
+
+void Node::AppendUse(Use* const use) {
+ use->next = nullptr;
+ use->prev = last_use_;
+ if (last_use_) {
+ last_use_->next = use;
+ } else {
+ first_use_ = use;
+ }
+ last_use_ = use;
+}
+
+
+void Node::RemoveUse(Use* const use) {
+ if (use == last_use_) {
+ last_use_ = use->prev;
+ }
+ if (use->prev) {
+ use->prev->next = use->next;
+ } else {
+ first_use_ = use->next;
+ }
+ if (use->next) {
+ use->next->prev = use->prev;
}
- return current->from;
}
@@ -117,6 +231,46 @@ std::ostream& operator<<(std::ostream& os, const Node& n) {
return os;
}
+
+Node::InputEdges::iterator Node::InputEdges::iterator::operator++(int n) {
+ iterator result(*this);
+ ++(*this);
+ return result;
+}
+
+
+bool Node::InputEdges::empty() const { return begin() == end(); }
+
+
+Node::Inputs::const_iterator Node::Inputs::const_iterator::operator++(int n) {
+ const_iterator result(*this);
+ ++(*this);
+ return result;
+}
+
+
+bool Node::Inputs::empty() const { return begin() == end(); }
+
+
+Node::UseEdges::iterator Node::UseEdges::iterator::operator++(int n) {
+ iterator result(*this);
+ ++(*this);
+ return result;
+}
+
+
+bool Node::UseEdges::empty() const { return begin() == end(); }
+
+
+Node::Uses::const_iterator Node::Uses::const_iterator::operator++(int n) {
+ const_iterator result(*this);
+ ++(*this);
+ return result;
+}
+
+
+bool Node::Uses::empty() const { return begin() == end(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 2295b7b5e3..57a0ebb72e 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -5,17 +5,9 @@
#ifndef V8_COMPILER_NODE_H_
#define V8_COMPILER_NODE_H_
-#include <deque>
-#include <set>
-#include <vector>
-
-#include "src/v8.h"
-
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/types.h"
-#include "src/zone.h"
-#include "src/zone-allocator.h"
+#include "src/types-inl.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -32,9 +24,11 @@ class Graph;
// {NodeMarker} has a range of values that indicate states of a node.
typedef uint32_t Mark;
+
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
-typedef int NodeId;
+typedef int32_t NodeId;
+
// A Node is the basic primitive of graphs. Nodes are chained together by
// input/use chains but by default otherwise contain only an identifying number
@@ -47,17 +41,12 @@ typedef int NodeId;
// by the Node's id.
class Node FINAL {
public:
- void Initialize(const Operator* op) {
- set_op(op);
- set_mark(0);
- }
+ static Node* New(Zone* zone, NodeId id, const Operator* op, int input_count,
+ Node** inputs, bool has_extensible_inputs);
- bool IsDead() const { return InputCount() > 0 && InputAt(0) == NULL; }
+ bool IsDead() const { return InputCount() > 0 && !InputAt(0); }
void Kill();
- void CollectProjections(ZoneVector<Node*>* projections);
- Node* FindProjection(size_t projection_index);
-
const Operator* op() const { return op_; }
void set_op(const Operator* op) { op_ = op; }
@@ -70,25 +59,25 @@ class Node FINAL {
int InputCount() const { return input_count(); }
Node* InputAt(int index) const { return GetInputRecordPtr(index)->to; }
- inline void ReplaceInput(int index, Node* new_input);
- inline void AppendInput(Zone* zone, Node* new_input);
- inline void InsertInput(Zone* zone, int index, Node* new_input);
- inline void RemoveInput(int index);
+ inline void ReplaceInput(int index, Node* new_to);
+ void AppendInput(Zone* zone, Node* new_to);
+ void InsertInput(Zone* zone, int index, Node* new_to);
+ void RemoveInput(int index);
+ void RemoveAllInputs();
+ void TrimInputCount(int new_input_count);
int UseCount() const;
Node* UseAt(int index) const;
- inline void ReplaceUses(Node* replace_to);
- template <class UnaryPredicate>
- inline void ReplaceUsesIf(UnaryPredicate pred, Node* replace_to);
- inline void RemoveAllInputs();
+ void ReplaceUses(Node* replace_to);
- inline void TrimInputCount(int input_count);
-
- class InputEdges {
+ class InputEdges FINAL {
public:
+ typedef Edge value_type;
+
class iterator;
- iterator begin() const;
- iterator end() const;
+ inline iterator begin() const;
+ inline iterator end() const;
+
bool empty() const;
explicit InputEdges(Node* node) : node_(node) {}
@@ -97,11 +86,16 @@ class Node FINAL {
Node* node_;
};
- class Inputs {
+ InputEdges input_edges() { return InputEdges(this); }
+
+ class Inputs FINAL {
public:
- class iterator;
- iterator begin() const;
- iterator end() const;
+ typedef Node* value_type;
+
+ class const_iterator;
+ inline const_iterator begin() const;
+ inline const_iterator end() const;
+
bool empty() const;
explicit Inputs(Node* node) : node_(node) {}
@@ -111,13 +105,15 @@ class Node FINAL {
};
Inputs inputs() { return Inputs(this); }
- InputEdges input_edges() { return InputEdges(this); }
- class UseEdges {
+ class UseEdges FINAL {
public:
+ typedef Edge value_type;
+
class iterator;
- iterator begin() const;
- iterator end() const;
+ inline iterator begin() const;
+ inline iterator end() const;
+
bool empty() const;
explicit UseEdges(Node* node) : node_(node) {}
@@ -126,11 +122,16 @@ class Node FINAL {
Node* node_;
};
- class Uses {
+ UseEdges use_edges() { return UseEdges(this); }
+
+ class Uses FINAL {
public:
- class iterator;
- iterator begin() const;
- iterator end() const;
+ typedef Node* value_type;
+
+ class const_iterator;
+ inline const_iterator begin() const;
+ inline const_iterator end() const;
+
bool empty() const;
explicit Uses(Node* node) : node_(node) {}
@@ -140,26 +141,21 @@ class Node FINAL {
};
Uses uses() { return Uses(this); }
- UseEdges use_edges() { return UseEdges(this); }
-
- bool OwnedBy(Node* owner) const;
- static Node* New(Graph* graph, int input_count, Node** inputs,
- bool has_extensible_inputs);
-
- protected:
- friend class Graph;
- friend class Edge;
+ // Returns true if {owner} is the user of {this} node.
+ bool OwnedBy(Node* owner) const {
+ return first_use_ && first_use_->from == owner && !first_use_->next;
+ }
- class Use : public ZoneObject {
- public:
+ private:
+ struct Use FINAL : public ZoneObject {
Node* from;
Use* next;
Use* prev;
int input_index;
};
- class Input {
+ class Input FINAL {
public:
Node* to;
Use* use;
@@ -167,30 +163,27 @@ class Node FINAL {
void Update(Node* new_to);
};
- void EnsureAppendableInputs(Zone* zone);
+ inline Node(NodeId id, const Operator* op, int input_count,
+ int reserve_input_count);
- Input* GetInputRecordPtr(int index) const {
- if (has_appendable_inputs()) {
- return &((*inputs_.appendable_)[index]);
- } else {
- return &inputs_.static_[index];
- }
+ inline void EnsureAppendableInputs(Zone* zone);
+
+ Input* GetInputRecordPtr(int index) {
+ return has_appendable_inputs() ? &((*inputs_.appendable_)[index])
+ : &inputs_.static_[index];
+ }
+ const Input* GetInputRecordPtr(int index) const {
+ return has_appendable_inputs() ? &((*inputs_.appendable_)[index])
+ : &inputs_.static_[index];
}
- inline void AppendUse(Use* use);
- inline void RemoveUse(Use* use);
+ inline void AppendUse(Use* const use);
+ inline void RemoveUse(Use* const use);
void* operator new(size_t, void* location) { return location; }
- private:
- inline Node(NodeId id, int input_count, int reserve_input_count);
-
typedef ZoneDeque<Input> InputDeque;
- friend class NodeProperties;
- template <typename State>
- friend class NodeMarker;
-
// Only NodeProperties should manipulate the bounds.
Bounds bounds() { return bounds_; }
void set_bounds(Bounds b) { bounds_ = b; }
@@ -230,33 +223,53 @@ class Node FINAL {
const Operator* op_;
Bounds bounds_;
Mark mark_;
- NodeId id_;
+ NodeId const id_;
unsigned bit_field_;
+ Use* first_use_;
+ Use* last_use_;
union {
// When a node is initially allocated, it uses a static buffer to hold its
// inputs under the assumption that the number of outputs will not increase.
// When the first input is appended, the static buffer is converted into a
// deque to allow for space-efficient growing.
- Input* static_;
+ Input static_[1];
InputDeque* appendable_;
} inputs_;
- Use* first_use_;
- Use* last_use_;
+
+ friend class Edge;
+ friend class NodeMarkerBase;
+ friend class NodeProperties;
DISALLOW_COPY_AND_ASSIGN(Node);
};
+std::ostream& operator<<(std::ostream& os, const Node& n);
+
+
+// Typedefs to shorten commonly used Node containers.
+typedef ZoneDeque<Node*> NodeDeque;
+typedef ZoneVector<Node*> NodeVector;
+typedef ZoneVector<NodeVector> NodeVectorVector;
+
+
+// Helper to extract parameters from Operator1<*> nodes.
+template <typename T>
+static inline const T& OpParameter(const Node* node) {
+ return OpParameter<T>(node->op());
+}
+
+
// An encapsulation for information associated with a single use of node as a
// input from another node, allowing access to both the defining node and
// the node having the input.
-class Edge {
+class Edge FINAL {
public:
Node* from() const { return input_->use->from; }
Node* to() const { return input_->to; }
int index() const {
- int index = input_->use->input_index;
- DCHECK(index < input_->use->from->input_count());
+ int const index = input_->use->input_index;
+ DCHECK_LT(index, input_->use->from->input_count());
return index;
}
@@ -266,59 +279,51 @@ class Edge {
void UpdateTo(Node* new_to) { input_->Update(new_to); }
private:
- friend class Node::Uses::iterator;
- friend class Node::Inputs::iterator;
friend class Node::UseEdges::iterator;
friend class Node::InputEdges::iterator;
- explicit Edge(Node::Input* input) : input_(input) {}
+ explicit Edge(Node::Input* input) : input_(input) { DCHECK_NOT_NULL(input); }
Node::Input* input_;
};
-// A forward iterator to visit the edges for the input dependencies of a node..
-class Node::InputEdges::iterator {
+// A forward iterator to visit the edges for the input dependencies of a node.
+class Node::InputEdges::iterator FINAL {
public:
typedef std::forward_iterator_tag iterator_category;
typedef int difference_type;
typedef Edge value_type;
typedef Edge* pointer;
typedef Edge& reference;
- iterator(const Node::InputEdges::iterator& other) // NOLINT
- : input_(other.input_) {}
- iterator() : input_(NULL) {}
+
+ iterator() : input_(nullptr) {}
+ iterator(const iterator& other) : input_(other.input_) {}
Edge operator*() const { return Edge(input_); }
- bool operator==(const iterator& other) const { return Equals(other); }
- bool operator!=(const iterator& other) const { return !Equals(other); }
+ bool operator==(const iterator& other) const {
+ return input_ == other.input_;
+ }
+ bool operator!=(const iterator& other) const { return !(*this == other); }
iterator& operator++() {
- DCHECK(input_ != NULL);
- Edge edge(input_);
- Node* from = edge.from();
- SetInput(from, input_->use->input_index + 1);
+ SetInput(Edge(input_).from(), input_->use->input_index + 1);
return *this;
}
- iterator operator++(int) {
- iterator result(*this);
- ++(*this);
- return result;
- }
+ iterator operator++(int);
private:
friend class Node;
- explicit iterator(Node* from, int index = 0) : input_(NULL) {
+ explicit iterator(Node* from, int index = 0) : input_(nullptr) {
SetInput(from, index);
}
- bool Equals(const iterator& other) const { return other.input_ == input_; }
void SetInput(Node* from, int index) {
DCHECK(index >= 0 && index <= from->InputCount());
if (index < from->InputCount()) {
input_ = from->GetInputRecordPtr(index);
} else {
- input_ = NULL;
+ input_ = nullptr;
}
}
@@ -326,8 +331,18 @@ class Node::InputEdges::iterator {
};
+Node::InputEdges::iterator Node::InputEdges::begin() const {
+ return Node::InputEdges::iterator(this->node_, 0);
+}
+
+
+Node::InputEdges::iterator Node::InputEdges::end() const {
+ return Node::InputEdges::iterator(this->node_, this->node_->InputCount());
+}
+
+
// A forward iterator to visit the inputs of a node.
-class Node::Inputs::iterator {
+class Node::Inputs::const_iterator FINAL {
public:
typedef std::forward_iterator_tag iterator_category;
typedef int difference_type;
@@ -335,323 +350,133 @@ class Node::Inputs::iterator {
typedef Node** pointer;
typedef Node*& reference;
- iterator(const Node::Inputs::iterator& other) // NOLINT
- : iter_(other.iter_) {}
+ const_iterator(const const_iterator& other) : iter_(other.iter_) {}
Node* operator*() const { return (*iter_).to(); }
- bool operator==(const iterator& other) const { return Equals(other); }
- bool operator!=(const iterator& other) const { return !Equals(other); }
- iterator& operator++() {
+ bool operator==(const const_iterator& other) const {
+ return iter_ == other.iter_;
+ }
+ bool operator!=(const const_iterator& other) const {
+ return !(*this == other);
+ }
+ const_iterator& operator++() {
++iter_;
return *this;
}
- iterator operator++(int) {
- iterator result(*this);
- ++(*this);
- return result;
- }
-
+ const_iterator operator++(int);
private:
friend class Node::Inputs;
- explicit iterator(Node* node, int index) : iter_(node, index) {}
-
- bool Equals(const iterator& other) const { return other.iter_ == iter_; }
+ const_iterator(Node* node, int index) : iter_(node, index) {}
Node::InputEdges::iterator iter_;
};
+
+Node::Inputs::const_iterator Node::Inputs::begin() const {
+ return const_iterator(this->node_, 0);
+}
+
+
+Node::Inputs::const_iterator Node::Inputs::end() const {
+ return const_iterator(this->node_, this->node_->InputCount());
+}
+
+
// A forward iterator to visit the uses edges of a node. The edges are returned
// in
// the order in which they were added as inputs.
-class Node::UseEdges::iterator {
+class Node::UseEdges::iterator FINAL {
public:
- iterator(const Node::UseEdges::iterator& other) // NOLINT
- : current_(other.current_),
- next_(other.next_) {}
+ iterator(const iterator& other)
+ : current_(other.current_), next_(other.next_) {}
- Edge operator*() const { return Edge(CurrentInput()); }
+ Edge operator*() const {
+ return Edge(current_->from->GetInputRecordPtr(current_->input_index));
+ }
- bool operator==(const iterator& other) { return Equals(other); }
- bool operator!=(const iterator& other) { return !Equals(other); }
+ bool operator==(const iterator& other) const {
+ return current_ == other.current_;
+ }
+ bool operator!=(const iterator& other) const { return !(*this == other); }
iterator& operator++() {
- DCHECK(current_ != NULL);
+ DCHECK_NOT_NULL(current_);
current_ = next_;
- next_ = (current_ == NULL) ? NULL : current_->next;
+ next_ = current_ ? current_->next : nullptr;
return *this;
}
- iterator operator++(int) {
- iterator result(*this);
- ++(*this);
- return result;
- }
+ iterator operator++(int);
private:
friend class Node::UseEdges;
- iterator() : current_(NULL), next_(NULL) {}
+ iterator() : current_(nullptr), next_(nullptr) {}
explicit iterator(Node* node)
: current_(node->first_use_),
- next_(current_ == NULL ? NULL : current_->next) {}
-
- bool Equals(const iterator& other) const {
- return other.current_ == current_;
- }
-
- Input* CurrentInput() const {
- return current_->from->GetInputRecordPtr(current_->input_index);
- }
+ next_(current_ ? current_->next : nullptr) {}
Node::Use* current_;
Node::Use* next_;
};
-// A forward iterator to visit the uses of a node. The uses are returned in
-// the order in which they were added as inputs.
-class Node::Uses::iterator {
- public:
- iterator(const Node::Uses::iterator& other) // NOLINT
- : current_(other.current_) {}
-
- Node* operator*() { return current_->from; }
-
- bool operator==(const iterator& other) { return other.current_ == current_; }
- bool operator!=(const iterator& other) { return other.current_ != current_; }
- iterator& operator++() {
- DCHECK(current_ != NULL);
- current_ = current_->next;
- return *this;
- }
-
- private:
- friend class Node::Uses;
-
- iterator() : current_(NULL) {}
- explicit iterator(Node* node) : current_(node->first_use_) {}
-
- Input* CurrentInput() const {
- return current_->from->GetInputRecordPtr(current_->input_index);
- }
-
- Node::Use* current_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const Node& n);
-
-typedef std::set<Node*, std::less<Node*>, zone_allocator<Node*> > NodeSet;
-typedef NodeSet::iterator NodeSetIter;
-typedef NodeSet::reverse_iterator NodeSetRIter;
-
-typedef ZoneDeque<Node*> NodeDeque;
-
-typedef ZoneVector<Node*> NodeVector;
-typedef NodeVector::iterator NodeVectorIter;
-typedef NodeVector::const_iterator NodeVectorConstIter;
-typedef NodeVector::reverse_iterator NodeVectorRIter;
-
-typedef ZoneVector<NodeVector> NodeVectorVector;
-typedef NodeVectorVector::iterator NodeVectorVectorIter;
-typedef NodeVectorVector::reverse_iterator NodeVectorVectorRIter;
-
-typedef Node::Uses::iterator UseIter;
-typedef Node::Inputs::iterator InputIter;
-
-// Helper to extract parameters from Operator1<*> nodes.
-template <typename T>
-static inline const T& OpParameter(const Node* node) {
- return OpParameter<T>(node->op());
-}
-
-inline Node::InputEdges::iterator Node::InputEdges::begin() const {
- return Node::InputEdges::iterator(this->node_, 0);
-}
-
-inline Node::InputEdges::iterator Node::InputEdges::end() const {
- return Node::InputEdges::iterator(this->node_, this->node_->InputCount());
-}
-
-inline Node::Inputs::iterator Node::Inputs::begin() const {
- return Node::Inputs::iterator(this->node_, 0);
-}
-
-inline Node::Inputs::iterator Node::Inputs::end() const {
- return Node::Inputs::iterator(this->node_, this->node_->InputCount());
-}
-
-inline Node::UseEdges::iterator Node::UseEdges::begin() const {
+Node::UseEdges::iterator Node::UseEdges::begin() const {
return Node::UseEdges::iterator(this->node_);
}
-inline Node::UseEdges::iterator Node::UseEdges::end() const {
- return Node::UseEdges::iterator();
-}
-inline Node::Uses::iterator Node::Uses::begin() const {
- return Node::Uses::iterator(this->node_);
+Node::UseEdges::iterator Node::UseEdges::end() const {
+ return Node::UseEdges::iterator();
}
-inline Node::Uses::iterator Node::Uses::end() const {
- return Node::Uses::iterator();
-}
-inline bool Node::InputEdges::empty() const { return begin() == end(); }
-inline bool Node::Uses::empty() const { return begin() == end(); }
-inline bool Node::UseEdges::empty() const { return begin() == end(); }
-inline bool Node::Inputs::empty() const { return begin() == end(); }
+// A forward iterator to visit the uses of a node. The uses are returned in
+// the order in which they were added as inputs.
+class Node::Uses::const_iterator FINAL {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef int difference_type;
+ typedef Node* value_type;
+ typedef Node** pointer;
+ typedef Node*& reference;
-inline void Node::ReplaceUses(Node* replace_to) {
- for (Use* use = first_use_; use != NULL; use = use->next) {
- use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
- }
- if (replace_to->last_use_ == NULL) {
- DCHECK_EQ(NULL, replace_to->first_use_);
- replace_to->first_use_ = first_use_;
- replace_to->last_use_ = last_use_;
- } else if (first_use_ != NULL) {
- DCHECK_NE(NULL, replace_to->first_use_);
- replace_to->last_use_->next = first_use_;
- first_use_->prev = replace_to->last_use_;
- replace_to->last_use_ = last_use_;
- }
- first_use_ = NULL;
- last_use_ = NULL;
-}
+ const_iterator(const const_iterator& other) : current_(other.current_) {}
-template <class UnaryPredicate>
-inline void Node::ReplaceUsesIf(UnaryPredicate pred, Node* replace_to) {
- for (Use* use = first_use_; use != NULL;) {
- Use* next = use->next;
- if (pred(use->from)) {
- RemoveUse(use);
- replace_to->AppendUse(use);
- use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
- }
- use = next;
+ Node* operator*() const { return current_->from; }
+ bool operator==(const const_iterator& other) const {
+ return other.current_ == current_;
}
-}
-
-inline void Node::RemoveAllInputs() {
- for (Edge edge : input_edges()) {
- edge.UpdateTo(NULL);
+ bool operator!=(const const_iterator& other) const {
+ return other.current_ != current_;
}
-}
-
-inline void Node::TrimInputCount(int new_input_count) {
- if (new_input_count == input_count()) return; // Nothing to do.
-
- DCHECK(new_input_count < input_count());
-
- // Update inline inputs.
- for (int i = new_input_count; i < input_count(); i++) {
- Node::Input* input = GetInputRecordPtr(i);
- input->Update(NULL);
+ const_iterator& operator++() {
+ DCHECK_NOT_NULL(current_);
+ current_ = current_->next;
+ return *this;
}
- set_input_count(new_input_count);
-}
+ const_iterator operator++(int);
-inline void Node::ReplaceInput(int index, Node* new_to) {
- Input* input = GetInputRecordPtr(index);
- input->Update(new_to);
-}
+ private:
+ friend class Node::Uses;
-inline void Node::Input::Update(Node* new_to) {
- Node* old_to = this->to;
- if (new_to == old_to) return; // Nothing to do.
- // Snip out the use from where it used to be
- if (old_to != NULL) {
- old_to->RemoveUse(use);
- }
- to = new_to;
- // And put it into the new node's use list.
- if (new_to != NULL) {
- new_to->AppendUse(use);
- } else {
- use->next = NULL;
- use->prev = NULL;
- }
-}
+ const_iterator() : current_(nullptr) {}
+ explicit const_iterator(Node* node) : current_(node->first_use_) {}
-inline void Node::EnsureAppendableInputs(Zone* zone) {
- if (!has_appendable_inputs()) {
- void* deque_buffer = zone->New(sizeof(InputDeque));
- InputDeque* deque = new (deque_buffer) InputDeque(zone);
- for (int i = 0; i < input_count(); ++i) {
- deque->push_back(inputs_.static_[i]);
- }
- inputs_.appendable_ = deque;
- set_has_appendable_inputs(true);
- }
-}
+ Node::Use* current_;
+};
-inline void Node::AppendInput(Zone* zone, Node* to_append) {
- Use* new_use = new (zone) Use;
- Input new_input;
- new_input.to = to_append;
- new_input.use = new_use;
- if (reserved_input_count() > 0) {
- DCHECK(!has_appendable_inputs());
- set_reserved_input_count(reserved_input_count() - 1);
- inputs_.static_[input_count()] = new_input;
- } else {
- EnsureAppendableInputs(zone);
- inputs_.appendable_->push_back(new_input);
- }
- new_use->input_index = input_count();
- new_use->from = this;
- to_append->AppendUse(new_use);
- set_input_count(input_count() + 1);
-}
-inline void Node::InsertInput(Zone* zone, int index, Node* to_insert) {
- DCHECK(index >= 0 && index < InputCount());
- // TODO(turbofan): Optimize this implementation!
- AppendInput(zone, InputAt(InputCount() - 1));
- for (int i = InputCount() - 1; i > index; --i) {
- ReplaceInput(i, InputAt(i - 1));
- }
- ReplaceInput(index, to_insert);
+Node::Uses::const_iterator Node::Uses::begin() const {
+ return const_iterator(this->node_);
}
-inline void Node::RemoveInput(int index) {
- DCHECK(index >= 0 && index < InputCount());
- // TODO(turbofan): Optimize this implementation!
- for (; index < InputCount() - 1; ++index) {
- ReplaceInput(index, InputAt(index + 1));
- }
- TrimInputCount(InputCount() - 1);
-}
-inline void Node::AppendUse(Use* use) {
- use->next = NULL;
- use->prev = last_use_;
- if (last_use_ == NULL) {
- first_use_ = use;
- } else {
- last_use_->next = use;
- }
- last_use_ = use;
-}
+Node::Uses::const_iterator Node::Uses::end() const { return const_iterator(); }
-inline void Node::RemoveUse(Use* use) {
- if (last_use_ == use) {
- last_use_ = use->prev;
- }
- if (use->prev != NULL) {
- use->prev->next = use->next;
- } else {
- first_use_ = use->next;
- }
- if (use->next != NULL) {
- use->next->prev = use->prev;
- }
-}
-inline bool Node::OwnedBy(Node* owner) const {
- return first_use_ != NULL && first_use_->from == owner &&
- first_use_->next == NULL;
+void Node::ReplaceInput(int index, Node* new_to) {
+ GetInputRecordPtr(index)->Update(new_to);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/opcodes.cc b/deps/v8/src/compiler/opcodes.cc
index 044395c785..1c94c19c78 100644
--- a/deps/v8/src/compiler/opcodes.cc
+++ b/deps/v8/src/compiler/opcodes.cc
@@ -25,7 +25,7 @@ char const* const kMnemonics[] = {
// static
char const* IrOpcode::Mnemonic(Value value) {
- size_t const n = std::max<size_t>(value, arraysize(kMnemonics) - 1);
+ size_t const n = std::min<size_t>(value, arraysize(kMnemonics) - 1);
return kMnemonics[n];
}
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index d229b6da99..a4f8d3ec16 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -12,9 +12,13 @@
V(Branch) \
V(IfTrue) \
V(IfFalse) \
+ V(Switch) \
+ V(IfValue) \
+ V(IfDefault) \
V(Merge) \
V(Return) \
- V(Terminate) \
+ V(OsrNormalEntry) \
+ V(OsrLoopEntry) \
V(Throw)
#define CONTROL_OP_LIST(V) \
@@ -22,19 +26,20 @@
V(Start) \
V(End)
-// Opcodes for common operators.
-#define LEAF_OP_LIST(V) \
- V(Int32Constant) \
- V(Int64Constant) \
- V(Float32Constant) \
- V(Float64Constant) \
- V(ExternalConstant) \
- V(NumberConstant) \
+// Opcodes for constant operators.
+#define CONSTANT_OP_LIST(V) \
+ V(Int32Constant) \
+ V(Int64Constant) \
+ V(Float32Constant) \
+ V(Float64Constant) \
+ V(ExternalConstant) \
+ V(NumberConstant) \
V(HeapConstant)
#define INNER_OP_LIST(V) \
V(Select) \
V(Phi) \
+ V(EffectSet) \
V(EffectPhi) \
V(ValueEffect) \
V(Finish) \
@@ -42,11 +47,13 @@
V(StateValues) \
V(Call) \
V(Parameter) \
+ V(OsrValue) \
V(Projection)
#define COMMON_OP_LIST(V) \
- LEAF_OP_LIST(V) \
- INNER_OP_LIST(V)
+ CONSTANT_OP_LIST(V) \
+ INNER_OP_LIST(V) \
+ V(Always)
// Opcodes for JavaScript operators.
#define JS_COMPARE_BINOP_LIST(V) \
@@ -145,6 +152,7 @@
V(NumberModulus) \
V(NumberToInt32) \
V(NumberToUint32) \
+ V(PlainPrimitiveToNumber) \
V(ReferenceEqual) \
V(StringEqual) \
V(StringLessThan) \
@@ -271,57 +279,28 @@ class IrOpcode {
// Returns the mnemonic name of an opcode.
static char const* Mnemonic(Value value);
- static bool IsJsOpcode(Value val) {
- switch (val) {
-// TODO(turbofan): make this a range check.
-#define RETURN_NAME(x) \
- case k##x: \
- return true;
- JS_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
- default:
- return false;
- }
+ // Returns true if opcode for common operator.
+ static bool IsCommonOpcode(Value value) {
+ return kDead <= value && value <= kAlways;
}
- static bool IsControlOpcode(Value val) {
- switch (val) {
-// TODO(turbofan): make this a range check.
-#define RETURN_NAME(x) \
- case k##x: \
- return true;
- CONTROL_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
- default:
- return false;
- }
+ // Returns true if opcode for control operator.
+ static bool IsControlOpcode(Value value) {
+ return kDead <= value && value <= kEnd;
}
- static bool IsLeafOpcode(Value val) {
- switch (val) {
-// TODO(turbofan): make this a table lookup.
-#define RETURN_NAME(x) \
- case k##x: \
- return true;
- LEAF_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
- default:
- return false;
- }
+ // Returns true if opcode for JavaScript operator.
+ static bool IsJsOpcode(Value value) {
+ return kJSEqual <= value && value <= kJSDebugger;
}
- static bool IsCommonOpcode(Value val) {
- switch (val) {
-// TODO(turbofan): make this a table lookup or a range check.
-#define RETURN_NAME(x) \
- case k##x: \
- return true;
- CONTROL_OP_LIST(RETURN_NAME)
- COMMON_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
- default:
- return false;
- }
+ // Returns true if opcode for constant operator.
+ static bool IsConstantOpcode(Value value) {
+ return kInt32Constant <= value && value <= kHeapConstant;
+ }
+
+ static bool IsPhiOpcode(Value val) {
+ return val == kPhi || val == kEffectPhi;
}
};
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index abfc5fd99c..53bd16c0af 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -57,21 +57,26 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSBitwiseOr:
case IrOpcode::kJSBitwiseXor:
case IrOpcode::kJSDivide:
- case IrOpcode::kJSLoadNamed:
- case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSModulus:
case IrOpcode::kJSMultiply:
case IrOpcode::kJSShiftLeft:
case IrOpcode::kJSShiftRight:
case IrOpcode::kJSShiftRightLogical:
- case IrOpcode::kJSStoreNamed:
- case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSSubtract:
+ // Context operations
+ case IrOpcode::kJSCreateWithContext:
+
// Conversions
case IrOpcode::kJSToObject:
+ case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToName:
- // Other
+ // Properties
+ case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSLoadProperty:
+ case IrOpcode::kJSStoreNamed:
+ case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSDeleteProperty:
return true;
@@ -95,7 +100,8 @@ bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
- opcode == IrOpcode::kIfFalse;
+ opcode == IrOpcode::kIfFalse || opcode == IrOpcode::kIfValue ||
+ opcode == IrOpcode::kIfDefault;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/operator.cc b/deps/v8/src/compiler/operator.cc
index c8687f4304..ae10348422 100644
--- a/deps/v8/src/compiler/operator.cc
+++ b/deps/v8/src/compiler/operator.cc
@@ -10,13 +10,20 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
template <typename N>
-static inline N CheckRange(size_t val) {
- CHECK(val <= std::numeric_limits<N>::max());
+V8_INLINE N CheckRange(size_t val) {
+ CHECK_LE(val, std::numeric_limits<N>::max());
return static_cast<N>(val);
}
+} // namespace
+
+
+// static
+STATIC_CONST_MEMBER_DEFINITION const size_t Operator::kMaxControlOutputCount;
+
Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
@@ -29,7 +36,7 @@ Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
control_in_(CheckRange<uint16_t>(control_in)),
value_out_(CheckRange<uint16_t>(value_out)),
effect_out_(CheckRange<uint8_t>(effect_out)),
- control_out_(CheckRange<uint8_t>(control_out)) {}
+ control_out_(CheckRange<uint16_t>(control_out)) {}
std::ostream& operator<<(std::ostream& os, const Operator& op) {
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index fb144ce896..6407499da5 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -44,6 +44,7 @@ class Operator : public ZoneObject {
// create new scheduling dependencies.
kNoThrow = 1 << 6, // Can never generate an exception.
kFoldable = kNoRead | kNoWrite,
+ kKontrol = kFoldable | kNoThrow,
kEliminatable = kNoWrite | kNoThrow,
kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
};
@@ -84,6 +85,9 @@ class Operator : public ZoneObject {
Properties properties() const { return properties_; }
+ // TODO(bmeurer): Use bit fields below?
+ static const size_t kMaxControlOutputCount = (1u << 16) - 1;
+
// TODO(titzer): convert return values here to size_t.
int ValueInputCount() const { return value_in_; }
int EffectInputCount() const { return effect_in_; }
@@ -93,7 +97,7 @@ class Operator : public ZoneObject {
int EffectOutputCount() const { return effect_out_; }
int ControlOutputCount() const { return control_out_; }
- static inline size_t ZeroIfPure(Properties properties) {
+ static size_t ZeroIfPure(Properties properties) {
return (properties & kPure) == kPure ? 0 : 1;
}
@@ -113,7 +117,7 @@ class Operator : public ZoneObject {
uint16_t control_in_;
uint16_t value_out_;
uint8_t effect_out_;
- uint8_t control_out_;
+ uint16_t control_out_;
DISALLOW_COPY_AND_ASSIGN(Operator);
};
@@ -143,7 +147,7 @@ class Operator1 : public Operator {
bool Equals(const Operator* other) const FINAL {
if (opcode() != other->opcode()) return false;
- const Operator1<T>* that = static_cast<const Operator1<T>*>(other);
+ const Operator1<T>* that = reinterpret_cast<const Operator1<T>*>(other);
return this->pred_(this->parameter(), that->parameter());
}
size_t HashCode() const FINAL {
@@ -169,21 +173,23 @@ class Operator1 : public Operator {
// Helper to extract parameters from Operator1<*> operator.
template <typename T>
inline T const& OpParameter(const Operator* op) {
- return static_cast<const Operator1<T>*>(op)->parameter();
+ return reinterpret_cast<const Operator1<T>*>(op)->parameter();
}
// NOTE: We have to be careful to use the right equal/hash functions below, for
// float/double we always use the ones operating on the bit level.
template <>
inline float const& OpParameter(const Operator* op) {
- return static_cast<const Operator1<float, base::bit_equal_to<float>,
- base::bit_hash<float>>*>(op)->parameter();
+ return reinterpret_cast<const Operator1<float, base::bit_equal_to<float>,
+ base::bit_hash<float>>*>(op)
+ ->parameter();
}
template <>
inline double const& OpParameter(const Operator* op) {
- return static_cast<const Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>*>(op)->parameter();
+ return reinterpret_cast<const Operator1<double, base::bit_equal_to<double>,
+ base::bit_hash<double>>*>(op)
+ ->parameter();
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
new file mode 100644
index 0000000000..b7cd7ec93f
--- /dev/null
+++ b/deps/v8/src/compiler/osr.cc
@@ -0,0 +1,286 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler.h"
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/control-reducer.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/loop-analysis.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/osr.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OsrHelper::OsrHelper(CompilationInfo* info)
+ : parameter_count_(info->scope()->num_parameters()),
+ stack_slot_count_(info->scope()->num_stack_slots() +
+ info->osr_expr_stack_height()) {}
+
+
+// Peel outer loops and rewire the graph so that control reduction can
+// produce a properly formed graph.
+static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
+ Zone* tmp_zone, Node* dead,
+ LoopTree* loop_tree, LoopTree::Loop* osr_loop,
+ Node* osr_normal_entry, Node* osr_loop_entry) {
+ const int original_count = graph->NodeCount();
+ AllNodes all(tmp_zone, graph);
+ NodeVector tmp_inputs(tmp_zone);
+ Node* sentinel = graph->NewNode(dead->op());
+
+ // Make a copy of the graph for each outer loop.
+ ZoneVector<NodeVector*> copies(tmp_zone);
+ for (LoopTree::Loop* loop = osr_loop->parent(); loop; loop = loop->parent()) {
+ void* stuff = tmp_zone->New(sizeof(NodeVector));
+ NodeVector* mapping =
+ new (stuff) NodeVector(original_count, sentinel, tmp_zone);
+ copies.push_back(mapping);
+
+ // Prepare the mapping for OSR values and the OSR loop entry.
+ mapping->at(osr_normal_entry->id()) = dead;
+ mapping->at(osr_loop_entry->id()) = dead;
+
+ // The outer loops are dead in this copy.
+ for (LoopTree::Loop* outer = loop->parent(); outer;
+ outer = outer->parent()) {
+ for (Node* node : loop_tree->HeaderNodes(outer)) {
+ mapping->at(node->id()) = dead;
+ }
+ }
+
+ // Copy all nodes.
+ for (size_t i = 0; i < all.live.size(); i++) {
+ Node* orig = all.live[i];
+ Node* copy = mapping->at(orig->id());
+ if (copy != sentinel) {
+ // Mapping already exists.
+ continue;
+ }
+ if (orig->InputCount() == 0 || orig->opcode() == IrOpcode::kParameter ||
+ orig->opcode() == IrOpcode::kOsrValue) {
+ // No need to copy leaf nodes or parameters.
+ mapping->at(orig->id()) = orig;
+ continue;
+ }
+
+ // Copy the node.
+ tmp_inputs.clear();
+ for (Node* input : orig->inputs()) {
+ tmp_inputs.push_back(mapping->at(input->id()));
+ }
+ copy = graph->NewNode(orig->op(), orig->InputCount(), &tmp_inputs[0]);
+ if (NodeProperties::IsTyped(orig)) {
+ NodeProperties::SetBounds(copy, NodeProperties::GetBounds(orig));
+ }
+ mapping->at(orig->id()) = copy;
+ }
+
+ // Fix missing inputs.
+ for (size_t i = 0; i < all.live.size(); i++) {
+ Node* orig = all.live[i];
+ Node* copy = mapping->at(orig->id());
+ for (int j = 0; j < copy->InputCount(); j++) {
+ Node* input = copy->InputAt(j);
+ if (input == sentinel)
+ copy->ReplaceInput(j, mapping->at(orig->InputAt(j)->id()));
+ }
+ }
+
+ // Construct the transfer from the previous graph copies to the new copy.
+ Node* loop_header = loop_tree->HeaderNode(loop);
+ NodeVector* previous =
+ copies.size() > 1 ? copies[copies.size() - 2] : nullptr;
+ const int backedges = loop_header->op()->ControlInputCount() - 1;
+ if (backedges == 1) {
+ // Simple case. Map the incoming edges to the loop to the previous copy.
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ if (!all.IsLive(node)) continue; // dead phi hanging off loop.
+ Node* copy = mapping->at(node->id());
+ Node* backedge = node->InputAt(1);
+ if (previous) backedge = previous->at(backedge->id());
+ copy->ReplaceInput(0, backedge);
+ }
+ } else {
+ // Complex case. Multiple backedges. Introduce a merge for incoming edges.
+ tmp_inputs.clear();
+ for (int i = 0; i < backedges; i++) {
+ Node* backedge = loop_header->InputAt(i + 1);
+ if (previous) backedge = previous->at(backedge->id());
+ tmp_inputs.push_back(backedge);
+ }
+ Node* merge =
+ graph->NewNode(common->Merge(backedges), backedges, &tmp_inputs[0]);
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ if (!all.IsLive(node)) continue; // dead phi hanging off loop.
+ Node* copy = mapping->at(node->id());
+ if (node == loop_header) {
+ // The entry to the loop is the merge.
+ copy->ReplaceInput(0, merge);
+ } else {
+ // Merge inputs to the phi at the loop entry.
+ tmp_inputs.clear();
+ for (int i = 0; i < backedges; i++) {
+ Node* backedge = node->InputAt(i + 1);
+ if (previous) backedge = previous->at(backedge->id());
+ tmp_inputs.push_back(backedge);
+ }
+ tmp_inputs.push_back(merge);
+ Node* phi =
+ graph->NewNode(common->ResizeMergeOrPhi(node->op(), backedges),
+ backedges + 1, &tmp_inputs[0]);
+ copy->ReplaceInput(0, phi);
+ }
+ }
+ }
+ }
+
+ // Kill the outer loops in the original graph.
+ for (LoopTree::Loop* outer = osr_loop->parent(); outer;
+ outer = outer->parent()) {
+ loop_tree->HeaderNode(outer)->ReplaceUses(dead);
+ }
+
+ // Merge the ends of the graph copies.
+ Node* end = graph->end();
+ tmp_inputs.clear();
+ for (int i = -1; i < static_cast<int>(copies.size()); i++) {
+ Node* input = end->InputAt(0);
+ if (i >= 0) input = copies[i]->at(input->id());
+ if (input->opcode() == IrOpcode::kMerge) {
+ for (Node* node : input->inputs()) tmp_inputs.push_back(node);
+ } else {
+ tmp_inputs.push_back(input);
+ }
+ }
+ int count = static_cast<int>(tmp_inputs.size());
+ Node* merge = graph->NewNode(common->Merge(count), count, &tmp_inputs[0]);
+ end->ReplaceInput(0, merge);
+
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after OSR duplication -- " << std::endl;
+ os << AsRPO(*graph);
+ }
+}
+
+
+static void TransferOsrValueTypesFromLoopPhis(Zone* zone, Node* osr_loop_entry,
+ Node* osr_loop) {
+ // Find the index of the osr loop entry into the loop.
+ int index = 0;
+ for (index = 0; index < osr_loop->InputCount(); index++) {
+ if (osr_loop->InputAt(index) == osr_loop_entry) break;
+ }
+ if (index == osr_loop->InputCount()) return;
+
+ for (Node* osr_value : osr_loop_entry->uses()) {
+ if (osr_value->opcode() != IrOpcode::kOsrValue) continue;
+ bool unknown = true;
+ for (Node* phi : osr_value->uses()) {
+ if (phi->opcode() != IrOpcode::kPhi) continue;
+ if (NodeProperties::GetControlInput(phi) != osr_loop) continue;
+ if (phi->InputAt(index) != osr_value) continue;
+ if (NodeProperties::IsTyped(phi)) {
+ // Transfer the type from the phi to the OSR value itself.
+ Bounds phi_bounds = NodeProperties::GetBounds(phi);
+ if (unknown) {
+ NodeProperties::SetBounds(osr_value, phi_bounds);
+ } else {
+ Bounds osr_bounds = NodeProperties::GetBounds(osr_value);
+ NodeProperties::SetBounds(osr_value,
+ Bounds::Both(phi_bounds, osr_bounds, zone));
+ }
+ unknown = false;
+ }
+ }
+ if (unknown) NodeProperties::SetBounds(osr_value, Bounds::Unbounded(zone));
+ }
+}
+
+
+bool OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
+ Zone* tmp_zone) {
+ Graph* graph = jsgraph->graph();
+ Node* osr_normal_entry = nullptr;
+ Node* osr_loop_entry = nullptr;
+ Node* osr_loop = nullptr;
+
+ for (Node* node : graph->start()->uses()) {
+ if (node->opcode() == IrOpcode::kOsrLoopEntry) {
+ osr_loop_entry = node; // found the OSR loop entry
+ } else if (node->opcode() == IrOpcode::kOsrNormalEntry) {
+ osr_normal_entry = node;
+ }
+ }
+
+ if (osr_loop_entry == nullptr) {
+ // No OSR entry found, do nothing.
+ CHECK(osr_normal_entry);
+ return true;
+ }
+
+ for (Node* use : osr_loop_entry->uses()) {
+ if (use->opcode() == IrOpcode::kLoop) {
+ CHECK(!osr_loop); // should be only one OSR loop.
+ osr_loop = use; // found the OSR loop.
+ }
+ }
+
+ CHECK(osr_loop); // Should have found the OSR loop.
+
+ // Transfer the types from loop phis to the OSR values which flow into them.
+ TransferOsrValueTypesFromLoopPhis(graph->zone(), osr_loop_entry, osr_loop);
+
+ // Analyze the graph to determine how deeply nested the OSR loop is.
+ LoopTree* loop_tree = LoopFinder::BuildLoopTree(graph, tmp_zone);
+
+ Node* dead = jsgraph->DeadControl();
+ LoopTree::Loop* loop = loop_tree->ContainingLoop(osr_loop);
+ if (loop->depth() > 0) {
+ PeelOuterLoopsForOsr(graph, common, tmp_zone, dead, loop_tree, loop,
+ osr_normal_entry, osr_loop_entry);
+ }
+
+ // Replace the normal entry with {Dead} and the loop entry with {Start}
+ // and run the control reducer to clean up the graph.
+ osr_normal_entry->ReplaceUses(dead);
+ osr_normal_entry->Kill();
+ osr_loop_entry->ReplaceUses(graph->start());
+ osr_loop_entry->Kill();
+
+ // Normally the control reducer removes loops whose first input is dead,
+ // but we need to avoid that because the osr_loop is reachable through
+ // the second input, so reduce it and its phis manually.
+ osr_loop->ReplaceInput(0, dead);
+ Node* node = ControlReducer::ReduceMerge(jsgraph, common, osr_loop);
+ if (node != osr_loop) osr_loop->ReplaceUses(node);
+
+ // Run the normal control reduction, which naturally trims away the dead
+ // parts of the graph.
+ ControlReducer::ReduceGraph(tmp_zone, jsgraph, common);
+
+ return true;
+}
+
+
+void OsrHelper::SetupFrame(Frame* frame) {
+ // The optimized frame will subsume the unoptimized frame. Do so by reserving
+ // the first spill slots.
+ frame->ReserveSpillSlots(UnoptimizedFrameSlots());
+ // The frame needs to be adjusted by the number of unoptimized frame slots.
+ frame->SetOsrStackSlotCount(static_cast<int>(UnoptimizedFrameSlots()));
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/osr.h b/deps/v8/src/compiler/osr.h
new file mode 100644
index 0000000000..549bb5f96a
--- /dev/null
+++ b/deps/v8/src/compiler/osr.h
@@ -0,0 +1,127 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OSR_H_
+#define V8_COMPILER_OSR_H_
+
+#include "src/zone.h"
+
+// TurboFan structures OSR graphs in a way that separates almost all phases of
+// compilation from OSR implementation details. This is accomplished with
+// special control nodes that are added at graph building time. In particular,
+// the graph is built in such a way that typing still computes the best types
+// and optimizations and lowering work unchanged. All that remains is to
+// deconstruct the OSR artifacts before scheduling and code generation.
+
+// Graphs built for OSR from the AstGraphBuilder are structured as follows:
+// Start
+// +-------------------^^-----+
+// | |
+// OsrNormalEntry OsrLoopEntry <-------------+
+// | | |
+// control flow before loop | A OsrValue
+// | | | |
+// | +------------------------+ | +-------+
+// | | +-------------+ | | +--------+
+// | | | | | | | |
+// ( Loop )<-----------|------------------ ( phi ) |
+// | | |
+// loop body | backedge(s) |
+// | | | |
+// | +--------------+ B <-----+
+// |
+// end
+
+// The control structure expresses the relationship that the loop has a separate
+// entrypoint which corresponds to entering the loop directly from the middle
+// of unoptimized code.
+// Similarly, the values that come in from unoptimized code are represented with
+// {OsrValue} nodes that merge into any phis associated with the OSR loop.
+// In the above diagram, nodes {A} and {B} represent values in the "normal"
+// graph that correspond to the values of those phis before the loop and on any
+// backedges, respectively.
+
+// To deconstruct OSR, we simply replace the uses of the {OsrNormalEntry}
+// control node with {Dead} and {OsrLoopEntry} with start and run the
+// {ControlReducer}. Control reduction propagates the dead control forward,
+// essentially "killing" all the code before the OSR loop. The entrypoint to the
+// loop corresponding to the "normal" entry path will also be removed, as well
+// as the inputs to the loop phis, resulting in the reduced graph:
+
+// Start
+// Dead |^-------------------------+
+// | | |
+// | | |
+// | | |
+// disconnected, dead | A=dead OsrValue
+// | |
+// +------------------+ +------+
+// | +-------------+ | +--------+
+// | | | | | |
+// ( Loop )<-----------|------------------ ( phi ) |
+// | | |
+// loop body | backedge(s) |
+// | | | |
+// | +--------------+ B <-----+
+// |
+// end
+
+// Other than the presences of the OsrValue nodes, this is a normal, schedulable
+// graph. OsrValue nodes are handled specially in the instruction selector to
+// simply load from the unoptimized frame.
+
+// For nested OSR loops, loop peeling must first be applied as many times as
+// necessary in order to bring the OSR loop up to the top level (i.e. to be
+// an outer loop).
+
+namespace v8 {
+namespace internal {
+
+class CompilationInfo;
+
+namespace compiler {
+
+class JSGraph;
+class CommonOperatorBuilder;
+class Frame;
+class Linkage;
+
+// Encapsulates logic relating to OSR compilations as well has handles some
+// details of the frame layout.
+class OsrHelper {
+ public:
+ explicit OsrHelper(CompilationInfo* info);
+ // Only for testing.
+ OsrHelper(size_t parameter_count, size_t stack_slot_count)
+ : parameter_count_(parameter_count),
+ stack_slot_count_(stack_slot_count) {}
+
+ // Deconstructs the artificial {OsrNormalEntry} and rewrites the graph so
+ // that only the path corresponding to {OsrLoopEntry} remains.
+ // Return {false} if the OSR deconstruction failed somehow.
+ bool Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
+ Zone* tmp_zone);
+
+ // Prepares the frame w.r.t. OSR.
+ void SetupFrame(Frame* frame);
+
+ // Returns the number of unoptimized frame slots for this OSR.
+ size_t UnoptimizedFrameSlots() { return stack_slot_count_; }
+
+ // Returns the environment index of the first stack slot.
+ static int FirstStackSlotIndex(int parameter_count) {
+ // n.b. unlike Crankshaft, TurboFan environments do not contain the context.
+ return 1 + parameter_count; // receiver + params
+ }
+
+ private:
+ size_t parameter_count_;
+ size_t stack_slot_count_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_OSR_H_
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index e58c396578..0203a35872 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -43,7 +43,7 @@ void PipelineStatistics::CommonStats::End(
PipelineStatistics::PipelineStatistics(CompilationInfo* info,
ZonePool* zone_pool)
- : isolate_(info->zone()->isolate()),
+ : isolate_(info->isolate()),
outer_zone_(info->zone()),
zone_pool_(zone_pool),
compilation_stats_(isolate_->GetTurboStatistics()),
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index c7432c6ea5..5ec5d085f7 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -8,12 +8,14 @@
#include <sstream>
#include "src/base/platform/elapsed-timer.h"
+#include "src/bootstrapper.h" // TODO(mstarzinger): Only temporary.
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/control-flow-optimizer.h"
#include "src/compiler/control-reducer.h"
#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-visualizer.h"
@@ -23,11 +25,15 @@
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/jump-threading.h"
#include "src/compiler/load-elimination.h"
+#include "src/compiler/loop-analysis.h"
+#include "src/compiler/loop-peeling.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/move-optimizer.h"
+#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/register-allocator-verifier.h"
@@ -49,16 +55,18 @@ namespace compiler {
class PipelineData {
public:
- explicit PipelineData(ZonePool* zone_pool, CompilationInfo* info)
- : isolate_(info->zone()->isolate()),
+ // For main entry point.
+ PipelineData(ZonePool* zone_pool, CompilationInfo* info,
+ PipelineStatistics* pipeline_statistics)
+ : isolate_(info->isolate()),
info_(info),
- outer_zone_(nullptr),
+ outer_zone_(info_->zone()),
zone_pool_(zone_pool),
- pipeline_statistics_(nullptr),
+ pipeline_statistics_(pipeline_statistics),
compilation_failed_(false),
code_(Handle<Code>::null()),
graph_zone_scope_(zone_pool_),
- graph_zone_(nullptr),
+ graph_zone_(graph_zone_scope_.zone()),
graph_(nullptr),
loop_assignment_(nullptr),
machine_(nullptr),
@@ -66,50 +74,81 @@ class PipelineData {
javascript_(nullptr),
jsgraph_(nullptr),
typer_(nullptr),
- context_node_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
- instruction_zone_(nullptr),
+ instruction_zone_(instruction_zone_scope_.zone()),
sequence_(nullptr),
frame_(nullptr),
- register_allocator_(nullptr) {}
-
- ~PipelineData() {
- DeleteInstructionZone();
- DeleteGraphZone();
- }
-
- // For main entry point.
- void Initialize(PipelineStatistics* pipeline_statistics) {
+ register_allocator_(nullptr) {
PhaseScope scope(pipeline_statistics, "init pipeline data");
- outer_zone_ = info()->zone();
- pipeline_statistics_ = pipeline_statistics;
- graph_zone_ = graph_zone_scope_.zone();
- graph_ = new (graph_zone()) Graph(graph_zone());
- source_positions_.Reset(new SourcePositionTable(graph()));
- machine_ = new (graph_zone()) MachineOperatorBuilder(
- graph_zone(), kMachPtr,
+ graph_ = new (graph_zone_) Graph(graph_zone_);
+ source_positions_.Reset(new SourcePositionTable(graph_));
+ machine_ = new (graph_zone_) MachineOperatorBuilder(
+ graph_zone_, kMachPtr,
InstructionSelector::SupportedMachineOperatorFlags());
- common_ = new (graph_zone()) CommonOperatorBuilder(graph_zone());
- javascript_ = new (graph_zone()) JSOperatorBuilder(graph_zone());
- jsgraph_ =
- new (graph_zone()) JSGraph(graph(), common(), javascript(), machine());
- typer_.Reset(new Typer(graph(), info()->context()));
- instruction_zone_ = instruction_zone_scope_.zone();
+ common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
+ javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
+ jsgraph_ = new (graph_zone_)
+ JSGraph(isolate_, graph_, common_, javascript_, machine_);
+ typer_.Reset(new Typer(isolate_, graph_, info_->context()));
}
// For machine graph testing entry point.
- void InitializeTorTesting(Graph* graph, Schedule* schedule) {
- graph_ = graph;
- source_positions_.Reset(new SourcePositionTable(graph));
- schedule_ = schedule;
- instruction_zone_ = instruction_zone_scope_.zone();
- }
+ PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
+ Schedule* schedule)
+ : isolate_(info->isolate()),
+ info_(info),
+ outer_zone_(nullptr),
+ zone_pool_(zone_pool),
+ pipeline_statistics_(nullptr),
+ compilation_failed_(false),
+ code_(Handle<Code>::null()),
+ graph_zone_scope_(zone_pool_),
+ graph_zone_(nullptr),
+ graph_(graph),
+ source_positions_(new SourcePositionTable(graph_)),
+ loop_assignment_(nullptr),
+ machine_(nullptr),
+ common_(nullptr),
+ javascript_(nullptr),
+ jsgraph_(nullptr),
+ typer_(nullptr),
+ schedule_(schedule),
+ instruction_zone_scope_(zone_pool_),
+ instruction_zone_(instruction_zone_scope_.zone()),
+ sequence_(nullptr),
+ frame_(nullptr),
+ register_allocator_(nullptr) {}
// For register allocation testing entry point.
- void InitializeTorTesting(InstructionSequence* sequence) {
- instruction_zone_ = sequence->zone();
- sequence_ = sequence;
+ PipelineData(ZonePool* zone_pool, CompilationInfo* info,
+ InstructionSequence* sequence)
+ : isolate_(info->isolate()),
+ info_(info),
+ outer_zone_(nullptr),
+ zone_pool_(zone_pool),
+ pipeline_statistics_(nullptr),
+ compilation_failed_(false),
+ code_(Handle<Code>::null()),
+ graph_zone_scope_(zone_pool_),
+ graph_zone_(nullptr),
+ graph_(nullptr),
+ loop_assignment_(nullptr),
+ machine_(nullptr),
+ common_(nullptr),
+ javascript_(nullptr),
+ jsgraph_(nullptr),
+ typer_(nullptr),
+ schedule_(nullptr),
+ instruction_zone_scope_(zone_pool_),
+ instruction_zone_(sequence->zone()),
+ sequence_(sequence),
+ frame_(nullptr),
+ register_allocator_(nullptr) {}
+
+ ~PipelineData() {
+ DeleteInstructionZone();
+ DeleteGraphZone();
}
Isolate* isolate() const { return isolate_; }
@@ -140,19 +179,13 @@ class PipelineData {
LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) {
- DCHECK_EQ(nullptr, loop_assignment_);
+ DCHECK(!loop_assignment_);
loop_assignment_ = loop_assignment;
}
- Node* context_node() const { return context_node_; }
- void set_context_node(Node* context_node) {
- DCHECK_EQ(nullptr, context_node_);
- context_node_ = context_node;
- }
-
Schedule* schedule() const { return schedule_; }
void set_schedule(Schedule* schedule) {
- DCHECK_EQ(nullptr, schedule_);
+ DCHECK(!schedule_);
schedule_ = schedule;
}
@@ -175,7 +208,6 @@ class PipelineData {
common_ = nullptr;
javascript_ = nullptr;
jsgraph_ = nullptr;
- context_node_ = nullptr;
schedule_ = nullptr;
}
@@ -189,19 +221,19 @@ class PipelineData {
}
void InitializeInstructionSequence() {
- DCHECK_EQ(nullptr, sequence_);
+ DCHECK(!sequence_);
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(instruction_zone(),
schedule());
- sequence_ = new (instruction_zone())
- InstructionSequence(instruction_zone(), instruction_blocks);
+ sequence_ = new (instruction_zone()) InstructionSequence(
+ info()->isolate(), instruction_zone(), instruction_blocks);
}
void InitializeRegisterAllocator(Zone* local_zone,
const RegisterConfiguration* config,
const char* debug_name) {
- DCHECK_EQ(nullptr, register_allocator_);
- DCHECK_EQ(nullptr, frame_);
+ DCHECK(!register_allocator_);
+ DCHECK(!frame_);
frame_ = new (instruction_zone()) Frame();
register_allocator_ = new (instruction_zone())
RegisterAllocator(config, local_zone, frame(), sequence(), debug_name);
@@ -230,7 +262,6 @@ class PipelineData {
JSGraph* jsgraph_;
// TODO(dcarney): make this into a ZoneObject.
SmartPointer<Typer> typer_;
- Node* context_node_;
Schedule* schedule_;
// All objects in the following group of fields are allocated in
@@ -246,15 +277,6 @@ class PipelineData {
};
-static inline bool VerifyGraphs() {
-#ifdef DEBUG
- return true;
-#else
- return FLAG_turbo_verify;
-#endif
-}
-
-
struct TurboCfgFile : public std::ofstream {
explicit TurboCfgFile(Isolate* isolate)
: std::ofstream(isolate->GetTurboCfgFileName().c_str(),
@@ -294,12 +316,12 @@ class AstGraphBuilderWithPositions : public AstGraphBuilder {
LoopAssignmentAnalysis* loop_assignment,
SourcePositionTable* source_positions)
: AstGraphBuilder(local_zone, info, jsgraph, loop_assignment),
- source_positions_(source_positions) {}
+ source_positions_(source_positions),
+ start_position_(info->shared_info()->start_position()) {}
- bool CreateGraph() {
- SourcePositionTable::Scope pos(source_positions_,
- SourcePosition::Unknown());
- return AstGraphBuilder::CreateGraph();
+ bool CreateGraph(bool constant_context) {
+ SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
+ return AstGraphBuilder::CreateGraph(constant_context);
}
#define DEF_VISIT(type) \
@@ -311,13 +333,47 @@ class AstGraphBuilderWithPositions : public AstGraphBuilder {
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
- Node* GetFunctionContext() { return AstGraphBuilder::GetFunctionContext(); }
-
private:
SourcePositionTable* source_positions_;
+ SourcePosition start_position_;
+};
+
+
+namespace {
+
+class SourcePositionWrapper : public Reducer {
+ public:
+ SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
+ : reducer_(reducer), table_(table) {}
+ virtual ~SourcePositionWrapper() {}
+
+ virtual Reduction Reduce(Node* node) {
+ SourcePosition pos = table_->GetSourcePosition(node);
+ SourcePositionTable::Scope position(table_, pos);
+ return reducer_->Reduce(node);
+ }
+
+ private:
+ Reducer* reducer_;
+ SourcePositionTable* table_;
+
+ DISALLOW_COPY_AND_ASSIGN(SourcePositionWrapper);
};
+static void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
+ Reducer* reducer) {
+ if (FLAG_turbo_source_positions) {
+ void* buffer = data->graph_zone()->New(sizeof(SourcePositionWrapper));
+ SourcePositionWrapper* wrapper =
+ new (buffer) SourcePositionWrapper(reducer, data->source_positions());
+ graph_reducer->AddReducer(wrapper);
+ } else {
+ graph_reducer->AddReducer(reducer);
+ }
+}
+} // namespace
+
class PipelineRunScope {
public:
PipelineRunScope(PipelineData* data, const char* phase_name)
@@ -364,13 +420,11 @@ struct LoopAssignmentAnalysisPhase {
struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
- void Run(PipelineData* data, Zone* temp_zone) {
+ void Run(PipelineData* data, Zone* temp_zone, bool constant_context) {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
data->source_positions());
- if (graph_builder.CreateGraph()) {
- data->set_context_node(graph_builder.GetFunctionContext());
- } else {
+ if (!graph_builder.CreateGraph(constant_context)) {
data->set_compilation_failed();
}
}
@@ -383,10 +437,9 @@ struct ContextSpecializerPhase {
void Run(PipelineData* data, Zone* temp_zone) {
SourcePositionTable::Scope pos(data->source_positions(),
SourcePosition::Unknown());
- JSContextSpecializer spec(data->info(), data->jsgraph(),
- data->context_node());
+ JSContextSpecializer spec(data->jsgraph());
GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&spec);
+ AddReducer(data, &graph_reducer, &spec);
graph_reducer.ReduceGraph();
}
};
@@ -399,7 +452,9 @@ struct InliningPhase {
SourcePositionTable::Scope pos(data->source_positions(),
SourcePosition::Unknown());
JSInliner inliner(temp_zone, data->info(), data->jsgraph());
- inliner.Inline();
+ GraphReducer graph_reducer(data->graph(), temp_zone);
+ AddReducer(data, &graph_reducer, &inliner);
+ graph_reducer.ReduceGraph();
}
};
@@ -411,6 +466,20 @@ struct TyperPhase {
};
+struct OsrDeconstructionPhase {
+ static const char* phase_name() { return "OSR deconstruction"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ SourcePositionTable::Scope pos(data->source_positions(),
+ SourcePosition::Unknown());
+ OsrHelper osr_helper(data->info());
+ bool success =
+ osr_helper.Deconstruct(data->jsgraph(), data->common(), temp_zone);
+ if (!success) data->info()->RetryOptimization(kOsrCompileFailed);
+ }
+};
+
+
struct TypedLoweringPhase {
static const char* phase_name() { return "typed lowering"; }
@@ -421,15 +490,17 @@ struct TypedLoweringPhase {
LoadElimination load_elimination;
JSBuiltinReducer builtin_reducer(data->jsgraph());
JSTypedLowering typed_lowering(data->jsgraph(), temp_zone);
+ JSIntrinsicLowering intrinsic_lowering(data->jsgraph());
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
CommonOperatorReducer common_reducer;
GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&vn_reducer);
- graph_reducer.AddReducer(&builtin_reducer);
- graph_reducer.AddReducer(&typed_lowering);
- graph_reducer.AddReducer(&load_elimination);
- graph_reducer.AddReducer(&simple_reducer);
- graph_reducer.AddReducer(&common_reducer);
+ AddReducer(data, &graph_reducer, &vn_reducer);
+ AddReducer(data, &graph_reducer, &builtin_reducer);
+ AddReducer(data, &graph_reducer, &typed_lowering);
+ AddReducer(data, &graph_reducer, &intrinsic_lowering);
+ AddReducer(data, &graph_reducer, &load_elimination);
+ AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
}
};
@@ -441,40 +512,50 @@ struct SimplifiedLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
SourcePositionTable::Scope pos(data->source_positions(),
SourcePosition::Unknown());
- SimplifiedLowering lowering(data->jsgraph(), temp_zone);
+ SimplifiedLowering lowering(data->jsgraph(), temp_zone,
+ data->source_positions());
lowering.LowerAllNodes();
ValueNumberingReducer vn_reducer(temp_zone);
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer;
GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&vn_reducer);
- graph_reducer.AddReducer(&simple_reducer);
- graph_reducer.AddReducer(&machine_reducer);
- graph_reducer.AddReducer(&common_reducer);
+ AddReducer(data, &graph_reducer, &vn_reducer);
+ AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &machine_reducer);
+ AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
}
};
+struct ControlFlowOptimizationPhase {
+ static const char* phase_name() { return "control flow optimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ ControlFlowOptimizer optimizer(data->jsgraph(), temp_zone);
+ optimizer.Optimize();
+ }
+};
+
+
struct ChangeLoweringPhase {
static const char* phase_name() { return "change lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
SourcePositionTable::Scope pos(data->source_positions(),
SourcePosition::Unknown());
- Linkage linkage(data->graph_zone(), data->info());
ValueNumberingReducer vn_reducer(temp_zone);
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
- ChangeLowering lowering(data->jsgraph(), &linkage);
+ ChangeLowering lowering(data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer;
GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&vn_reducer);
- graph_reducer.AddReducer(&simple_reducer);
- graph_reducer.AddReducer(&lowering);
- graph_reducer.AddReducer(&machine_reducer);
- graph_reducer.AddReducer(&common_reducer);
+ AddReducer(data, &graph_reducer, &vn_reducer);
+ AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &lowering);
+ AddReducer(data, &graph_reducer, &machine_reducer);
+ AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
}
};
@@ -499,17 +580,35 @@ struct LateControlReductionPhase : ControlReductionPhase {
};
+struct StressLoopPeelingPhase {
+ static const char* phase_name() { return "stress loop peeling"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ SourcePositionTable::Scope pos(data->source_positions(),
+ SourcePosition::Unknown());
+ // Peel the first outer loop for testing.
+ // TODO(titzer): peel all loops? the N'th loop? Innermost loops?
+ LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
+ if (loop_tree != NULL && loop_tree->outer_loops().size() > 0) {
+ LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
+ loop_tree->outer_loops()[0], temp_zone);
+ }
+ }
+};
+
+
struct GenericLoweringPhase {
static const char* phase_name() { return "generic lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
SourcePositionTable::Scope pos(data->source_positions(),
SourcePosition::Unknown());
- JSGenericLowering generic(data->info(), data->jsgraph());
+ JSGenericLowering generic(data->info()->is_typing_enabled(),
+ data->jsgraph());
SelectLowering select(data->jsgraph()->graph(), data->jsgraph()->common());
GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&generic);
- graph_reducer.AddReducer(&select);
+ AddReducer(data, &graph_reducer, &generic);
+ AddReducer(data, &graph_reducer, &select);
graph_reducer.ReduceGraph();
}
};
@@ -519,9 +618,11 @@ struct ComputeSchedulePhase {
static const char* phase_name() { return "scheduling"; }
void Run(PipelineData* data, Zone* temp_zone) {
- Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph());
- TraceSchedule(schedule);
- if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
+ Schedule* schedule = Scheduler::ComputeSchedule(
+ temp_zone, data->graph(), data->info()->is_splitting_enabled()
+ ? Scheduler::kSplitNodes
+ : Scheduler::kNoFlags);
+ if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
data->set_schedule(schedule);
}
};
@@ -531,7 +632,7 @@ struct InstructionSelectionPhase {
static const char* phase_name() { return "select instructions"; }
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- InstructionSelector selector(temp_zone, data->graph(), linkage,
+ InstructionSelector selector(temp_zone, data->graph()->NodeCount(), linkage,
data->sequence(), data->schedule(),
data->source_positions());
selector.SelectInstructions();
@@ -584,11 +685,11 @@ struct AllocateDoubleRegistersPhase {
};
-struct ReuseSpillSlotsPhase {
- static const char* phase_name() { return "reuse spill slots"; }
+struct AssignSpillSlotsPhase {
+ static const char* phase_name() { return "assign spill slots"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->ReuseSpillSlots();
+ data->register_allocator()->AssignSpillSlots();
}
};
@@ -668,27 +769,9 @@ struct PrintGraphPhase {
void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
CompilationInfo* info = data->info();
Graph* graph = data->graph();
- char buffer[256];
- Vector<char> filename(buffer, sizeof(buffer));
- SmartArrayPointer<char> functionname;
- if (!info->shared_info().is_null()) {
- functionname = info->shared_info()->DebugName()->ToCString();
- if (strlen(functionname.get()) > 0) {
- SNPrintF(filename, "turbo-%s-%s", functionname.get(), phase);
- } else {
- SNPrintF(filename, "turbo-%p-%s", static_cast<void*>(info), phase);
- }
- } else {
- SNPrintF(filename, "turbo-none-%s", phase);
- }
- std::replace(filename.start(), filename.start() + filename.length(), ' ',
- '_');
{ // Print dot.
- char dot_buffer[256];
- Vector<char> dot_filename(dot_buffer, sizeof(dot_buffer));
- SNPrintF(dot_filename, "%s.dot", filename.start());
- FILE* dot_file = base::OS::FOpen(dot_filename.start(), "w+");
+ FILE* dot_file = OpenVisualizerLogFile(info, phase, "dot", "w+");
if (dot_file == nullptr) return;
OFStream dot_of(dot_file);
dot_of << AsDOT(*graph);
@@ -696,24 +779,19 @@ struct PrintGraphPhase {
}
{ // Print JSON.
- char json_buffer[256];
- Vector<char> json_filename(json_buffer, sizeof(json_buffer));
- SNPrintF(json_filename, "%s.json", filename.start());
- FILE* json_file = base::OS::FOpen(json_filename.start(), "w+");
+ FILE* json_file = OpenVisualizerLogFile(info, NULL, "json", "a+");
if (json_file == nullptr) return;
OFStream json_of(json_file);
- json_of << AsJSON(*graph);
+ json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
+ << AsJSON(*graph, data->source_positions()) << "},\n";
fclose(json_file);
}
- OFStream os(stdout);
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
os << "-- Graph after " << phase << " -- " << std::endl;
os << AsRPO(*graph);
}
-
- os << "-- " << phase << " graph printed to file " << filename.start()
- << std::endl;
}
};
@@ -740,28 +818,37 @@ void Pipeline::RunPrintAndVerify(const char* phase, bool untyped) {
if (FLAG_trace_turbo) {
Run<PrintGraphPhase>(phase);
}
- if (VerifyGraphs()) {
+ if (FLAG_turbo_verify) {
Run<VerifyGraphPhase>(untyped);
}
}
Handle<Code> Pipeline::GenerateCode() {
- // This list must be kept in sync with DONT_TURBOFAN_NODE in ast.cc.
- if (info()->function()->dont_optimize_reason() == kTryCatchStatement ||
- info()->function()->dont_optimize_reason() == kTryFinallyStatement ||
- // TODO(turbofan): Make ES6 for-of work and remove this bailout.
- info()->function()->dont_optimize_reason() == kForOfStatement ||
- // TODO(turbofan): Make super work and remove this bailout.
- info()->function()->dont_optimize_reason() == kSuperReference ||
- // TODO(turbofan): Make class literals work and remove this bailout.
- info()->function()->dont_optimize_reason() == kClassLiteral ||
- // TODO(turbofan): Make OSR work and remove this bailout.
- info()->is_osr()) {
+ if (info()->is_osr() && !FLAG_turbo_osr) {
+ // TODO(turbofan): remove this flag and always handle OSR
+ info()->RetryOptimization(kOsrCompileFailed);
return Handle<Code>::null();
}
- ZonePool zone_pool(isolate());
+ // TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
+ // the correct solution is to restore the context register after invoking
+ // builtins from full-codegen.
+ Handle<SharedFunctionInfo> shared = info()->shared_info();
+ if (isolate()->bootstrapper()->IsActive() ||
+ shared->disable_optimization_reason() ==
+ kBuiltinFunctionCannotBeOptimized) {
+ shared->DisableOptimization(kBuiltinFunctionCannotBeOptimized);
+ return Handle<Code>::null();
+ }
+
+ // TODO(dslomov): support turbo optimization of subclass constructors.
+ if (IsSubclassConstructor(shared->kind())) {
+ shared->DisableOptimization(kSuperReference);
+ return Handle<Code>::null();
+ }
+
+ ZonePool zone_pool;
SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
@@ -769,9 +856,34 @@ Handle<Code> Pipeline::GenerateCode() {
pipeline_statistics->BeginPhaseKind("initializing");
}
- PipelineData data(&zone_pool, info());
+ if (FLAG_trace_turbo) {
+ FILE* json_file = OpenVisualizerLogFile(info(), NULL, "json", "w+");
+ if (json_file != nullptr) {
+ OFStream json_of(json_file);
+ Handle<Script> script = info()->script();
+ FunctionLiteral* function = info()->function();
+ SmartArrayPointer<char> function_name =
+ info()->shared_info()->DebugName()->ToCString();
+ int pos = info()->shared_info()->start_position();
+ json_of << "{\"function\":\"" << function_name.get()
+ << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ DisallowHeapAllocation no_allocation;
+ int start = function->start_position();
+ int len = function->end_position() - start;
+ String::SubStringRange source(String::cast(script->source()), start,
+ len);
+ for (const auto& c : source) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+ }
+ json_of << "\",\n\"phases\":[";
+ fclose(json_file);
+ }
+ }
+
+ PipelineData data(&zone_pool, info(), pipeline_statistics.get());
this->data_ = &data;
- data.Initialize(pipeline_statistics.get());
BeginPhaseKind("graph creation");
@@ -790,7 +902,7 @@ Handle<Code> Pipeline::GenerateCode() {
Run<LoopAssignmentAnalysisPhase>();
}
- Run<GraphBuilderPhase>();
+ Run<GraphBuilderPhase>(info()->is_context_specializing());
if (data.compilation_failed()) return Handle<Code>::null();
RunPrintAndVerify("Initial untyped", true);
@@ -829,10 +941,27 @@ Handle<Code> Pipeline::GenerateCode() {
Run<TypedLoweringPhase>();
RunPrintAndVerify("Lowered typed");
+ if (FLAG_turbo_stress_loop_peeling) {
+ Run<StressLoopPeelingPhase>();
+ RunPrintAndVerify("Loop peeled", true);
+ }
+
+ if (info()->is_osr()) {
+ Run<OsrDeconstructionPhase>();
+ if (info()->bailout_reason() != kNoReason) return Handle<Code>::null();
+ RunPrintAndVerify("OSR deconstruction");
+ }
+
// Lower simplified operators and insert changes.
Run<SimplifiedLoweringPhase>();
RunPrintAndVerify("Lowered simplified");
+ // Optimize control flow.
+ if (FLAG_turbo_switch) {
+ Run<ControlFlowOptimizationPhase>();
+ RunPrintAndVerify("Control flow optimized");
+ }
+
// Lower changes that have been inserted before.
Run<ChangeLoweringPhase>();
// // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
@@ -840,6 +969,12 @@ Handle<Code> Pipeline::GenerateCode() {
Run<LateControlReductionPhase>();
RunPrintAndVerify("Late Control reduced");
+ } else {
+ if (info()->is_osr()) {
+ Run<OsrDeconstructionPhase>();
+ if (info()->bailout_reason() != kNoReason) return Handle<Code>::null();
+ RunPrintAndVerify("OSR deconstruction");
+ }
}
// Lower any remaining generic JSOperators.
@@ -851,28 +986,8 @@ Handle<Code> Pipeline::GenerateCode() {
data.source_positions()->RemoveDecorator();
- // Compute a schedule.
- Run<ComputeSchedulePhase>();
-
- {
- // Generate optimized code.
- Linkage linkage(data.instruction_zone(), info());
- GenerateCode(&linkage);
- }
- Handle<Code> code = data.code();
- info()->SetCode(code);
-
- // Print optimized code.
- v8::internal::CodeGenerator::PrintCode(code, info());
-
- if (FLAG_trace_turbo) {
- OFStream os(stdout);
- os << "---------------------------------------------------\n"
- << "Finished compiling method " << GetDebugName(info()).get()
- << " using Turbofan" << std::endl;
- }
-
- return code;
+ return ScheduleAndGenerateCode(
+ Linkage::ComputeIncoming(data.instruction_zone(), info()));
}
@@ -885,10 +1000,12 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
}
-Handle<Code> Pipeline::GenerateCodeForTesting(CallDescriptor* call_descriptor,
+Handle<Code> Pipeline::GenerateCodeForTesting(Isolate* isolate,
+ CallDescriptor* call_descriptor,
Graph* graph,
Schedule* schedule) {
- CompilationInfo info(graph->zone()->isolate(), graph->zone());
+ FakeStubForTesting stub(isolate);
+ CompilationInfo info(&stub, isolate, graph->zone());
return GenerateCodeForTesting(&info, call_descriptor, graph, schedule);
}
@@ -897,42 +1014,27 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
Graph* graph,
Schedule* schedule) {
- CHECK(SupportedBackend());
- ZonePool zone_pool(info->isolate());
+ // Construct a pipeline for scheduling and code generation.
+ ZonePool zone_pool;
+ PipelineData data(&zone_pool, info, graph, schedule);
Pipeline pipeline(info);
- PipelineData data(&zone_pool, info);
pipeline.data_ = &data;
- data.InitializeTorTesting(graph, schedule);
- if (schedule == NULL) {
+ if (data.schedule() == nullptr) {
// TODO(rossberg): Should this really be untyped?
pipeline.RunPrintAndVerify("Machine", true);
- pipeline.Run<ComputeSchedulePhase>();
- } else {
- TraceSchedule(schedule);
}
- Linkage linkage(info->zone(), call_descriptor);
- pipeline.GenerateCode(&linkage);
- Handle<Code> code = data.code();
-
-#if ENABLE_DISASSEMBLER
- if (!code.is_null() && FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble("test code", os);
- }
-#endif
- return code;
+ return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
- CompilationInfo info(sequence->zone()->isolate(), sequence->zone());
- ZonePool zone_pool(sequence->zone()->isolate());
- PipelineData data(&zone_pool, &info);
- data.InitializeTorTesting(sequence);
+ FakeStubForTesting stub(sequence->isolate());
+ CompilationInfo info(&stub, sequence->isolate(), sequence->zone());
+ ZonePool zone_pool;
+ PipelineData data(&zone_pool, &info, sequence);
Pipeline pipeline(&info);
pipeline.data_ = &data;
pipeline.AllocateRegisters(config, run_verifier);
@@ -940,14 +1042,16 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
}
-void Pipeline::GenerateCode(Linkage* linkage) {
+Handle<Code> Pipeline::ScheduleAndGenerateCode(
+ CallDescriptor* call_descriptor) {
PipelineData* data = this->data_;
- DCHECK_NOT_NULL(linkage);
DCHECK_NOT_NULL(data->graph());
- DCHECK_NOT_NULL(data->schedule());
CHECK(SupportedBackend());
+ if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
+ TraceSchedule(data->schedule());
+
BasicBlockProfiler::Data* profiler_data = NULL;
if (FLAG_turbo_profiling) {
profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
@@ -957,7 +1061,8 @@ void Pipeline::GenerateCode(Linkage* linkage) {
data->InitializeInstructionSequence();
// Select and schedule instructions covering the scheduled graph.
- Run<InstructionSelectionPhase>(linkage);
+ Linkage linkage(call_descriptor);
+ Run<InstructionSelectionPhase>(&linkage);
if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
@@ -969,15 +1074,12 @@ void Pipeline::GenerateCode(Linkage* linkage) {
BeginPhaseKind("register allocation");
- bool run_verifier = false;
-#ifdef DEBUG
- run_verifier = true;
-#endif
+ bool run_verifier = FLAG_turbo_verify_allocation;
// Allocate registers.
AllocateRegisters(RegisterConfiguration::ArchDefault(), run_verifier);
if (data->compilation_failed()) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
- return;
+ return Handle<Code>();
}
BeginPhaseKind("code generation");
@@ -988,15 +1090,44 @@ void Pipeline::GenerateCode(Linkage* linkage) {
}
// Generate final machine code.
- Run<GenerateCodePhase>(linkage);
+ Run<GenerateCodePhase>(&linkage);
+ Handle<Code> code = data->code();
if (profiler_data != NULL) {
#if ENABLE_DISASSEMBLER
std::ostringstream os;
- data->code()->Disassemble(NULL, os);
+ code->Disassemble(NULL, os);
profiler_data->SetCode(&os);
#endif
}
+
+ info()->SetCode(code);
+ v8::internal::CodeGenerator::PrintCode(code, info());
+
+ if (FLAG_trace_turbo) {
+ FILE* json_file = OpenVisualizerLogFile(info(), NULL, "json", "a+");
+ if (json_file != nullptr) {
+ OFStream json_of(json_file);
+ json_of
+ << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+#if ENABLE_DISASSEMBLER
+ std::stringstream disassembly_stream;
+ code->Disassemble(NULL, disassembly_stream);
+ std::string disassembly_string(disassembly_stream.str());
+ for (const auto& c : disassembly_string) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+#endif // ENABLE_DISASSEMBLER
+ json_of << "\"}\n]}";
+ fclose(json_file);
+ }
+ OFStream os(stdout);
+ os << "---------------------------------------------------\n"
+ << "Finished compiling method " << GetDebugName(info()).get()
+ << " using Turbofan" << std::endl;
+ }
+
+ return code;
}
@@ -1004,17 +1135,11 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
bool run_verifier) {
PipelineData* data = this->data_;
- int node_count = data->sequence()->VirtualRegisterCount();
- if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
- data->set_compilation_failed();
- return;
- }
-
// Don't track usage for this zone in compiler stats.
SmartPointer<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
- verifier_zone.Reset(new Zone(info()->isolate()));
+ verifier_zone.Reset(new Zone());
verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
verifier_zone.get(), config, data->sequence());
}
@@ -1027,6 +1152,10 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
ZonePool::Scope zone_scope(data->zone_pool());
data->InitializeRegisterAllocator(zone_scope.zone(), config,
debug_name.get());
+ if (info()->is_osr()) {
+ OsrHelper osr_helper(info());
+ osr_helper.SetupFrame(data->frame());
+ }
Run<MeetRegisterConstraintsPhase>();
Run<ResolvePhisPhase>();
@@ -1041,18 +1170,9 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
CHECK(!data->register_allocator()->ExistsUseWithoutDefinition());
}
Run<AllocateGeneralRegistersPhase>();
- if (!data->register_allocator()->AllocationOk()) {
- data->set_compilation_failed();
- return;
- }
Run<AllocateDoubleRegistersPhase>();
- if (!data->register_allocator()->AllocationOk()) {
- data->set_compilation_failed();
- return;
- }
- if (FLAG_turbo_reuse_spill_slots) {
- Run<ReuseSpillSlotsPhase>();
- }
+ Run<AssignSpillSlotsPhase>();
+
Run<CommitAssignmentPhase>();
Run<PopulatePointerMapsPhase>();
Run<ConnectRangesPhase>();
@@ -1079,16 +1199,6 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
}
}
-
-void Pipeline::SetUp() {
- InstructionOperand::SetUpCaches();
-}
-
-
-void Pipeline::TearDown() {
- InstructionOperand::TearDownCaches();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 73053dc18e..240ff69f59 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -5,18 +5,14 @@
#ifndef V8_COMPILER_PIPELINE_H_
#define V8_COMPILER_PIPELINE_H_
-#include "src/v8.h"
-
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
#include "src/compiler.h"
-// Note: TODO(turbofan) implies a performance improvement opportunity,
-// and TODO(name) implies an incomplete implementation
-
namespace v8 {
namespace internal {
namespace compiler {
-// Clients of this interface shouldn't depend on lots of compiler internals.
class CallDescriptor;
class Graph;
class InstructionSequence;
@@ -40,7 +36,8 @@ class Pipeline {
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(CallDescriptor* call_descriptor,
+ static Handle<Code> GenerateCodeForTesting(Isolate* isolate,
+ CallDescriptor* call_descriptor,
Graph* graph,
Schedule* schedule = nullptr);
@@ -52,9 +49,6 @@ class Pipeline {
static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
- static void SetUp();
- static void TearDown();
-
private:
static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
@@ -74,7 +68,7 @@ class Pipeline {
void BeginPhaseKind(const char* phase_kind);
void RunPrintAndVerify(const char* phase, bool untyped = false);
- void GenerateCode(Linkage* linkage);
+ Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
bool run_verifier);
};
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
new file mode 100644
index 0000000000..467d035cc4
--- /dev/null
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -0,0 +1,1363 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/ppc/macro-assembler-ppc.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r11
+
+
+// Adds PPC-specific methods to convert InstructionOperands.
+class PPCOperandConverter FINAL : public InstructionOperandConverter {
+ public:
+ PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ RCBit OutputRCBit() const {
+ switch (instr_->flags_mode()) {
+ case kFlags_branch:
+ case kFlags_set:
+ return SetRC;
+ case kFlags_none:
+ return LeaveRC;
+ }
+ UNREACHABLE();
+ return LeaveRC;
+ }
+
+ bool CompareLogical() const {
+ switch (instr_->flags_condition()) {
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ Operand InputImmediate(int index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kInt64:
+#if V8_TARGET_ARCH_PPC64
+ return Operand(constant.ToInt64());
+#endif
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ case Constant::kRpoNumber:
+ break;
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ MemOperand MemoryOperand(AddressingMode* mode, int* first_index) {
+ const int index = *first_index;
+ *mode = AddressingModeField::decode(instr_->opcode());
+ switch (*mode) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ return MemOperand(r0);
+ }
+
+ MemOperand MemoryOperand(AddressingMode* mode, int first_index = 0) {
+ return MemoryOperand(mode, &first_index);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK(op != NULL);
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+
+namespace {
+
+class OutOfLineLoadNAN32 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
+ kScratchReg);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadNAN64 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
+ kScratchReg);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadZero FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadZero(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ li(result_, Operand::Zero()); }
+
+ private:
+ Register const result_;
+};
+
+
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ case kUnsignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ case kUnsignedGreaterThan:
+ return gt;
+ case kOverflow:
+#if V8_TARGET_ARCH_PPC64
+ return ne;
+#else
+ return lt;
+#endif
+ case kNotOverflow:
+#if V8_TARGET_ARCH_PPC64
+ return eq;
+#else
+ return ge;
+#endif
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+} // namespace
+
+#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1), i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1)); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputImmediate(1)); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), i.OutputRCBit()); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputImmediate(1), i.OutputRCBit()); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), i.OutputRCBit()); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputInt32(1), i.OutputRCBit()); \
+ } \
+ } while (0)
+
+
+#if V8_TARGET_ARCH_PPC64
+#define ASSEMBLE_ADD_WITH_OVERFLOW() \
+ do { \
+ ASSEMBLE_BINOP(add, addi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+#else
+#define ASSEMBLE_ADD_WITH_OVERFLOW() \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), kScratchReg, r0); \
+ } else { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputInt32(1), kScratchReg, r0); \
+ } \
+ } while (0)
+#endif
+
+
+#if V8_TARGET_ARCH_PPC64
+#define ASSEMBLE_SUB_WITH_OVERFLOW() \
+ do { \
+ ASSEMBLE_BINOP(sub, subi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+#else
+#define ASSEMBLE_SUB_WITH_OVERFLOW() \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), kScratchReg, r0); \
+ } else { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ -i.InputInt32(1), kScratchReg, r0); \
+ } \
+ } while (0)
+#endif
+
+
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
+ do { \
+ const CRegister cr = cr0; \
+ if (HasRegisterInput(instr, 1)) { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr); \
+ } \
+ } else { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
+ } else { \
+ __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
+ } \
+ } \
+ DCHECK_EQ(SetRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
+ do { \
+ const CRegister cr = cr0; \
+ __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
+ DCHECK_EQ(SetRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_MODULO(div_instr, mul_instr) \
+ do { \
+ const Register scratch = kScratchReg; \
+ __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1)); \
+ __ mul_instr(scratch, scratch, i.InputRegister(1)); \
+ __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
+ i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_MODULO() \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
+ 0, 2); \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
+ do { \
+ DoubleRegister result = i.OutputDoubleRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx) \
+ do { \
+ int index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ DoubleRegister value = i.InputDoubleRegister(index); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ int index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Register value = i.InputRegister(index); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
+ do { \
+ DoubleRegister result = i.OutputDoubleRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, 0); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
+ __ bge(ool->entry()); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, 0); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+ __ bge(ool->entry()); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr, asm_instrx) \
+ do { \
+ Label done; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, 0); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ DoubleRegister value = i.InputDoubleRegister(3); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ __ bind(&done); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Label done; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, 0); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ Register value = i.InputRegister(3); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ __ bind(&done); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_STORE_WRITE_BARRIER() \
+ do { \
+ Register object = i.InputRegister(0); \
+ Register index = i.InputRegister(1); \
+ Register value = i.InputRegister(2); \
+ __ add(index, object, index); \
+ __ StoreP(value, MemOperand(index)); \
+ SaveFPRegsMode mode = \
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; \
+ LinkRegisterStatus lr_status = kLRHasNotBeenSaved; \
+ __ RecordWrite(object, index, value, lr_status, mode); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
+
+ switch (opcode) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (HasRegisterInput(instr, 0)) {
+ __ addi(ip, i.InputRegister(0),
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+ } else {
+ __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ }
+ AddSafepointAndDeopt(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ LoadP(kScratchReg,
+ FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, kScratchReg);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+ AddSafepointAndDeopt(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchRet:
+ AssembleReturn();
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchStackPointer:
+ __ mr(i.OutputRegister(), sp);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchTruncateDoubleToI:
+ // TODO(mbrandy): move slow call to stub out of line.
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_And32:
+ case kPPC_And64:
+ if (HasRegisterInput(instr, 1)) {
+ __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ } else {
+ __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ }
+ break;
+ case kPPC_AndComplement32:
+ case kPPC_AndComplement64:
+ __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_Or32:
+ case kPPC_Or64:
+ if (HasRegisterInput(instr, 1)) {
+ __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ } else {
+ __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_OrComplement32:
+ case kPPC_OrComplement64:
+ __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_Xor32:
+ case kPPC_Xor64:
+ if (HasRegisterInput(instr, 1)) {
+ __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ } else {
+ __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_ShiftLeft32:
+ ASSEMBLE_BINOP_RC(slw, slwi);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ShiftLeft64:
+ ASSEMBLE_BINOP_RC(sld, sldi);
+ break;
+#endif
+ case kPPC_ShiftRight32:
+ ASSEMBLE_BINOP_RC(srw, srwi);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ShiftRight64:
+ ASSEMBLE_BINOP_RC(srd, srdi);
+ break;
+#endif
+ case kPPC_ShiftRightAlg32:
+ ASSEMBLE_BINOP_INT_RC(sraw, srawi);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ShiftRightAlg64:
+ ASSEMBLE_BINOP_INT_RC(srad, sradi);
+ break;
+#endif
+ case kPPC_RotRight32:
+ if (HasRegisterInput(instr, 1)) {
+ __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
+ __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
+ i.OutputRCBit());
+ } else {
+ int sh = i.InputInt32(1);
+ __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
+ }
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_RotRight64:
+ if (HasRegisterInput(instr, 1)) {
+ __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
+ __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
+ i.OutputRCBit());
+ } else {
+ int sh = i.InputInt32(1);
+ __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
+ }
+ break;
+#endif
+ case kPPC_Not32:
+ case kPPC_Not64:
+ __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
+ break;
+ case kPPC_RotLeftAndMask32:
+ __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_RotLeftAndClear64:
+ __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 63 - i.InputInt32(2), i.OutputRCBit());
+ break;
+ case kPPC_RotLeftAndClearLeft64:
+ __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 63 - i.InputInt32(2), i.OutputRCBit());
+ break;
+ case kPPC_RotLeftAndClearRight64:
+ __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 63 - i.InputInt32(2), i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Add32:
+ case kPPC_Add64:
+ if (HasRegisterInput(instr, 1)) {
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_AddWithOverflow32:
+ ASSEMBLE_ADD_WITH_OVERFLOW();
+ break;
+ case kPPC_AddFloat64:
+ ASSEMBLE_FLOAT_BINOP_RC(fadd);
+ break;
+ case kPPC_Sub32:
+ case kPPC_Sub64:
+ if (HasRegisterInput(instr, 1)) {
+ __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_SubWithOverflow32:
+ ASSEMBLE_SUB_WITH_OVERFLOW();
+ break;
+ case kPPC_SubFloat64:
+ ASSEMBLE_FLOAT_BINOP_RC(fsub);
+ break;
+ case kPPC_Mul32:
+ __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Mul64:
+ __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_MulHigh32:
+ __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_MulHighU32:
+ __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_MulFloat64:
+ ASSEMBLE_FLOAT_BINOP_RC(fmul);
+ break;
+ case kPPC_Div32:
+ __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Div64:
+ __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_DivU32:
+ __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_DivU64:
+ __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_DivFloat64:
+ ASSEMBLE_FLOAT_BINOP_RC(fdiv);
+ break;
+ case kPPC_Mod32:
+ ASSEMBLE_MODULO(divw, mullw);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Mod64:
+ ASSEMBLE_MODULO(divd, mulld);
+ break;
+#endif
+ case kPPC_ModU32:
+ ASSEMBLE_MODULO(divwu, mullw);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ModU64:
+ ASSEMBLE_MODULO(divdu, mulld);
+ break;
+#endif
+ case kPPC_ModFloat64:
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ ASSEMBLE_FLOAT_MODULO();
+ break;
+ case kPPC_Neg32:
+ case kPPC_Neg64:
+ __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
+ break;
+ case kPPC_SqrtFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
+ break;
+ case kPPC_FloorFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(frim);
+ break;
+ case kPPC_CeilFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(frip);
+ break;
+ case kPPC_TruncateFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(friz);
+ break;
+ case kPPC_RoundFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(frin);
+ break;
+ case kPPC_NegFloat64:
+ ASSEMBLE_FLOAT_UNOP_RC(fneg);
+ break;
+ case kPPC_Cmp32:
+ ASSEMBLE_COMPARE(cmpw, cmplw);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Cmp64:
+ ASSEMBLE_COMPARE(cmp, cmpl);
+ break;
+#endif
+ case kPPC_CmpFloat64:
+ ASSEMBLE_FLOAT_COMPARE(fcmpu);
+ break;
+ case kPPC_Tst32:
+ if (HasRegisterInput(instr, 1)) {
+ __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
+ } else {
+ __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
+ }
+#if V8_TARGET_ARCH_PPC64
+ __ extsw(r0, r0, i.OutputRCBit());
+#endif
+ DCHECK_EQ(SetRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Tst64:
+ if (HasRegisterInput(instr, 1)) {
+ __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
+ } else {
+ __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
+ }
+ DCHECK_EQ(SetRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Push:
+ __ Push(i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_ExtendSignWord8:
+ __ extsb(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_ExtendSignWord16:
+ __ extsh(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ExtendSignWord32:
+ __ extsw(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint32ToUint64:
+ // Zero extend
+ __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Int64ToInt32:
+ // TODO(mbrandy): sign extend?
+ __ Move(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Int32ToFloat64:
+ __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint32ToFloat64:
+ __ ConvertUnsignedIntToDouble(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Float64ToInt32:
+ case kPPC_Float64ToUint32:
+ __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_PPC64
+ kScratchReg,
+#endif
+ i.OutputRegister(), kScratchDoubleReg);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Float64ToFloat32:
+ ASSEMBLE_FLOAT_UNOP_RC(frsp);
+ break;
+ case kPPC_Float32ToFloat64:
+ // Nothing to do.
+ __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_LoadWordU8:
+ ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ break;
+ case kPPC_LoadWordS8:
+ ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ __ extsb(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kPPC_LoadWordU16:
+ ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
+ break;
+ case kPPC_LoadWordS16:
+ ASSEMBLE_LOAD_INTEGER(lha, lhax);
+ break;
+ case kPPC_LoadWordS32:
+ ASSEMBLE_LOAD_INTEGER(lwa, lwax);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_LoadWord64:
+ ASSEMBLE_LOAD_INTEGER(ld, ldx);
+ break;
+#endif
+ case kPPC_LoadFloat32:
+ ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
+ break;
+ case kPPC_LoadFloat64:
+ ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
+ break;
+ case kPPC_StoreWord8:
+ ASSEMBLE_STORE_INTEGER(stb, stbx);
+ break;
+ case kPPC_StoreWord16:
+ ASSEMBLE_STORE_INTEGER(sth, sthx);
+ break;
+ case kPPC_StoreWord32:
+ ASSEMBLE_STORE_INTEGER(stw, stwx);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_StoreWord64:
+ ASSEMBLE_STORE_INTEGER(std, stdx);
+ break;
+#endif
+ case kPPC_StoreFloat32:
+ ASSEMBLE_STORE_FLOAT(stfs, stfsx);
+ break;
+ case kPPC_StoreFloat64:
+ ASSEMBLE_STORE_FLOAT(stfd, stfdx);
+ break;
+ case kPPC_StoreWriteBarrier:
+ ASSEMBLE_STORE_WRITE_BARRIER();
+ break;
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
+ __ extsb(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(stfs, stfsx);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(stfd, stfdx);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ PPCOperandConverter i(this, instr);
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ ArchOpcode op = instr->arch_opcode();
+ FlagsCondition condition = branch->condition;
+ CRegister cr = cr0;
+
+ // Overflow checked for add/sub only.
+ DCHECK((condition != kOverflow && condition != kNotOverflow) ||
+ (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
+
+ Condition cond = FlagsConditionToCondition(condition);
+ if (op == kPPC_CmpFloat64) {
+ // check for unordered if necessary
+ if (cond == le) {
+ __ bunordered(flabel, cr);
+ // Unnecessary for eq/lt since only FU bit will be set.
+ } else if (cond == gt) {
+ __ bunordered(tlabel, cr);
+ // Unnecessary for ne/ge since only FU bit will be set.
+ }
+ }
+ __ b(cond, tlabel, cr);
+ if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ PPCOperandConverter i(this, instr);
+ Label done;
+ ArchOpcode op = instr->arch_opcode();
+ bool check_unordered = (op == kPPC_CmpFloat64);
+ CRegister cr = cr0;
+
+ // Overflow checked for add/sub only.
+ DCHECK((condition != kOverflow && condition != kNotOverflow) ||
+ (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+
+ Condition cond = FlagsConditionToCondition(condition);
+ switch (cond) {
+ case eq:
+ case lt:
+ __ li(reg, Operand::Zero());
+ __ li(kScratchReg, Operand(1));
+ __ isel(cond, reg, kScratchReg, reg, cr);
+ break;
+ case ne:
+ case ge:
+ __ li(reg, Operand(1));
+ __ isel(NegateCondition(cond), reg, r0, reg, cr);
+ break;
+ case gt:
+ if (check_unordered) {
+ __ li(reg, Operand(1));
+ __ li(kScratchReg, Operand::Zero());
+ __ bunordered(&done, cr);
+ __ isel(cond, reg, reg, kScratchReg, cr);
+ } else {
+ __ li(reg, Operand::Zero());
+ __ li(kScratchReg, Operand(1));
+ __ isel(cond, reg, kScratchReg, reg, cr);
+ }
+ break;
+ case le:
+ if (check_unordered) {
+ __ li(reg, Operand::Zero());
+ __ li(kScratchReg, Operand(1));
+ __ bunordered(&done, cr);
+ __ isel(NegateCondition(cond), reg, r0, kScratchReg, cr);
+ } else {
+ __ li(reg, Operand(1));
+ __ isel(NegateCondition(cond), reg, r0, reg, cr);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ __ function_descriptor();
+#endif
+ int register_save_area_size = 0;
+ RegList frame_saves = fp.bit();
+ __ mflr(r0);
+#if V8_OOL_CONSTANT_POOL
+ __ Push(r0, fp, kConstantPoolRegister);
+ // Adjust FP to point to saved FP.
+ __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ register_save_area_size += kPointerSize;
+ frame_saves |= kConstantPoolRegister.bit();
+#else
+ __ Push(r0, fp);
+ __ mr(fp, sp);
+#endif
+ // Save callee-saved registers.
+ const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ register_save_area_size += kPointerSize;
+ }
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
+ __ MultiPush(saves);
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = this->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ int stack_slots = frame()->GetSpillSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
+ stack_slots -= frame()->GetOsrStackSlotCount();
+ }
+
+ if (stack_slots > 0) {
+ __ Add(sp, sp, -stack_slots * kPointerSize, r0);
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ Add(sp, sp, stack_slots * kPointerSize, r0);
+ }
+ // Restore registers.
+ RegList frame_saves = fp.bit();
+#if V8_OOL_CONSTANT_POOL
+ frame_saves |= kConstantPoolRegister.bit();
+#endif
+ const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+ }
+ __ LeaveFrame(StackFrame::MANUAL);
+ __ Ret();
+ } else {
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
+ __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+ __ Ret();
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ PPCOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Move(g.ToRegister(destination), src);
+ } else {
+ __ StoreP(src, g.ToMemOperand(destination), r0);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ LoadP(g.ToRegister(destination), src, r0);
+ } else {
+ Register temp = kScratchReg;
+ __ LoadP(temp, src, r0);
+ __ StoreP(temp, g.ToMemOperand(destination), r0);
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ mov(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kInt64:
+ __ mov(dst, Operand(src.ToInt64()));
+ break;
+ case Constant::kFloat32:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
+ case Constant::kFloat64:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ mov(dst, Operand(src.ToExternalReference()));
+ break;
+ case Constant::kHeapObject:
+ __ Move(dst, src.ToHeapObject());
+ break;
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
+ break;
+ }
+ if (destination->IsStackSlot()) {
+ __ StoreP(dst, g.ToMemOperand(destination), r0);
+ }
+ } else {
+ DoubleRegister dst = destination->IsDoubleRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+ : src.ToFloat64();
+ __ LoadDoubleLiteral(dst, value, kScratchReg);
+ if (destination->IsDoubleStackSlot()) {
+ __ StoreDouble(dst, g.ToMemOperand(destination), r0);
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DoubleRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ StoreDouble(src, g.ToMemOperand(destination), r0);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+ } else {
+ DoubleRegister temp = kScratchDoubleReg;
+ __ LoadDouble(temp, src, r0);
+ __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ PPCOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mr(temp, src);
+ __ mr(src, dst);
+ __ mr(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mr(temp, src);
+ __ LoadP(src, dst);
+ __ StoreP(temp, dst);
+ }
+#if V8_TARGET_ARCH_PPC64
+ } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+#else
+ } else if (source->IsStackSlot()) {
+#endif
+ DCHECK(destination->IsStackSlot());
+ Register temp_0 = kScratchReg;
+ Register temp_1 = r0;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ LoadP(temp_0, src);
+ __ LoadP(temp_1, dst);
+ __ StoreP(temp_0, dst);
+ __ StoreP(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister temp = kScratchDoubleReg;
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DoubleRegister dst = g.ToDoubleRegister(destination);
+ __ fmr(temp, src);
+ __ fmr(src, dst);
+ __ fmr(dst, temp);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ fmr(temp, src);
+ __ lfd(src, dst);
+ __ stfd(temp, dst);
+ }
+#if !V8_TARGET_ARCH_PPC64
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ DoubleRegister temp_0 = kScratchDoubleReg;
+ DoubleRegister temp_1 = d0;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ lfd(temp_0, src);
+ __ lfd(temp_1, dst);
+ __ stfd(temp_0, dst);
+ __ stfd(temp_1, src);
+#endif
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+ // We do not insert nops for inlined Smi code.
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
+ }
+ }
+ }
+ MarkLazyDeoptSite();
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
new file mode 100644
index 0000000000..715a904fef
--- /dev/null
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -0,0 +1,125 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
+#define V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// PPC-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(PPC_And32) \
+ V(PPC_And64) \
+ V(PPC_AndComplement32) \
+ V(PPC_AndComplement64) \
+ V(PPC_Or32) \
+ V(PPC_Or64) \
+ V(PPC_OrComplement32) \
+ V(PPC_OrComplement64) \
+ V(PPC_Xor32) \
+ V(PPC_Xor64) \
+ V(PPC_ShiftLeft32) \
+ V(PPC_ShiftLeft64) \
+ V(PPC_ShiftRight32) \
+ V(PPC_ShiftRight64) \
+ V(PPC_ShiftRightAlg32) \
+ V(PPC_ShiftRightAlg64) \
+ V(PPC_RotRight32) \
+ V(PPC_RotRight64) \
+ V(PPC_Not32) \
+ V(PPC_Not64) \
+ V(PPC_RotLeftAndMask32) \
+ V(PPC_RotLeftAndClear64) \
+ V(PPC_RotLeftAndClearLeft64) \
+ V(PPC_RotLeftAndClearRight64) \
+ V(PPC_Add32) \
+ V(PPC_AddWithOverflow32) \
+ V(PPC_Add64) \
+ V(PPC_AddFloat64) \
+ V(PPC_Sub32) \
+ V(PPC_SubWithOverflow32) \
+ V(PPC_Sub64) \
+ V(PPC_SubFloat64) \
+ V(PPC_Mul32) \
+ V(PPC_Mul64) \
+ V(PPC_MulHigh32) \
+ V(PPC_MulHighU32) \
+ V(PPC_MulFloat64) \
+ V(PPC_Div32) \
+ V(PPC_Div64) \
+ V(PPC_DivU32) \
+ V(PPC_DivU64) \
+ V(PPC_DivFloat64) \
+ V(PPC_Mod32) \
+ V(PPC_Mod64) \
+ V(PPC_ModU32) \
+ V(PPC_ModU64) \
+ V(PPC_ModFloat64) \
+ V(PPC_Neg32) \
+ V(PPC_Neg64) \
+ V(PPC_NegFloat64) \
+ V(PPC_SqrtFloat64) \
+ V(PPC_FloorFloat64) \
+ V(PPC_CeilFloat64) \
+ V(PPC_TruncateFloat64) \
+ V(PPC_RoundFloat64) \
+ V(PPC_Cmp32) \
+ V(PPC_Cmp64) \
+ V(PPC_CmpFloat64) \
+ V(PPC_Tst32) \
+ V(PPC_Tst64) \
+ V(PPC_Push) \
+ V(PPC_ExtendSignWord8) \
+ V(PPC_ExtendSignWord16) \
+ V(PPC_ExtendSignWord32) \
+ V(PPC_Uint32ToUint64) \
+ V(PPC_Int64ToInt32) \
+ V(PPC_Int32ToFloat64) \
+ V(PPC_Uint32ToFloat64) \
+ V(PPC_Float32ToFloat64) \
+ V(PPC_Float64ToInt32) \
+ V(PPC_Float64ToUint32) \
+ V(PPC_Float64ToFloat32) \
+ V(PPC_LoadWordS8) \
+ V(PPC_LoadWordU8) \
+ V(PPC_LoadWordS16) \
+ V(PPC_LoadWordU16) \
+ V(PPC_LoadWordS32) \
+ V(PPC_LoadWord64) \
+ V(PPC_LoadFloat32) \
+ V(PPC_LoadFloat64) \
+ V(PPC_StoreWord8) \
+ V(PPC_StoreWord16) \
+ V(PPC_StoreWord32) \
+ V(PPC_StoreWord64) \
+ V(PPC_StoreFloat32) \
+ V(PPC_StoreFloat64) \
+ V(PPC_StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
new file mode 100644
index 0000000000..6d39df6538
--- /dev/null
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -0,0 +1,1383 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+ kInt16Imm,
+ kInt16Imm_Unsigned,
+ kInt16Imm_Negate,
+ kInt16Imm_4ByteAligned,
+ kShift32Imm,
+ kShift64Imm,
+ kNoImmediate
+};
+
+
+// Adds PPC-specific methods for generating operands.
+class PPCOperandGenerator FINAL : public OperandGenerator {
+ public:
+ explicit PPCOperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+ if (CanBeImmediate(node, mode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, ImmediateMode mode) {
+ int64_t value;
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ value = OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ value = OpParameter<int64_t>(node);
+ else
+ return false;
+ return CanBeImmediate(value, mode);
+ }
+
+ bool CanBeImmediate(int64_t value, ImmediateMode mode) {
+ switch (mode) {
+ case kInt16Imm:
+ return is_int16(value);
+ case kInt16Imm_Unsigned:
+ return is_uint16(value);
+ case kInt16Imm_Negate:
+ return is_int16(-value);
+ case kInt16Imm_4ByteAligned:
+ return is_int16(value) && !(value & 3);
+ case kShift32Imm:
+ return 0 <= value && value < 32;
+ case kShift64Imm:
+ return 0 <= value && value < 64;
+ case kNoImmediate:
+ return false;
+ }
+ return false;
+ }
+};
+
+
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+static void VisitRRR(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, ImmediateMode operand_mode) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, ImmediateMode operand_mode,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ Matcher m(node);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, ImmediateMode operand_mode) {
+ FlagsContinuation cont;
+ VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+
+ ArchOpcode opcode;
+ ImmediateMode mode = kInt16Imm;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kPPC_LoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kPPC_LoadFloat64;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = (typ == kTypeInt32) ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
+ break;
+ case kRepWord16:
+ opcode = (typ == kTypeInt32) ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
+ break;
+#if !V8_TARGET_ARCH_PPC64
+ case kRepTagged: // Fall through.
+#endif
+ case kRepWord32:
+ opcode = kPPC_LoadWordS32;
+#if V8_TARGET_ARCH_PPC64
+ // TODO(mbrandy): this applies to signed loads only (lwa)
+ mode = kInt16Imm_4ByteAligned;
+#endif
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kPPC_LoadWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#endif
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK(rep == kRepTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand temps[] = {g.TempRegister(r8), g.TempRegister(r9)};
+ Emit(kPPC_StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r7),
+ g.UseFixed(offset, r8), g.UseFixed(value, r9), arraysize(temps),
+ temps);
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+ ArchOpcode opcode;
+ ImmediateMode mode = kInt16Imm;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kPPC_StoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kPPC_StoreFloat64;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kPPC_StoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kPPC_StoreWord16;
+ break;
+#if !V8_TARGET_ARCH_PPC64
+ case kRepTagged: // Fall through.
+#endif
+ case kRepWord32:
+ opcode = kPPC_StoreWord32;
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kPPC_StoreWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#endif
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+ }
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ PPCOperandGenerator g(this);
+ Node* const base = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressingMode = kMode_MRR;
+ Emit(opcode | AddressingModeField::encode(addressingMode),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
+ g.UseOperand(length, kInt16Imm_Unsigned));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ PPCOperandGenerator g(this);
+ Node* const base = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressingMode = kMode_MRR;
+ Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset),
+ g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
+}
+
+
+template <typename Matcher>
+static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
+ ArchOpcode opcode, bool left_can_cover,
+ bool right_can_cover, ImmediateMode imm_mode) {
+ PPCOperandGenerator g(selector);
+
+ // Map instruction to equivalent operation with inverted right input.
+ ArchOpcode inv_opcode = opcode;
+ switch (opcode) {
+ case kPPC_And32:
+ inv_opcode = kPPC_AndComplement32;
+ break;
+ case kPPC_And64:
+ inv_opcode = kPPC_AndComplement64;
+ break;
+ case kPPC_Or32:
+ inv_opcode = kPPC_OrComplement32;
+ break;
+ case kPPC_Or64:
+ inv_opcode = kPPC_OrComplement64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
+ if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
+ Matcher mleft(m->left().node());
+ if (mleft.right().Is(-1)) {
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->right().node()),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+
+ // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
+ if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
+ right_can_cover) {
+ Matcher mright(m->right().node());
+ if (mright.right().Is(-1)) {
+ // TODO(all): support shifted operand on right.
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->left().node()),
+ g.UseRegister(mright.left().node()));
+ return;
+ }
+ }
+
+ VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+}
+
+
+static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
+ int mask_width = base::bits::CountPopulation32(value);
+ int mask_msb = base::bits::CountLeadingZeros32(value);
+ int mask_lsb = base::bits::CountTrailingZeros32(value);
+ if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
+ return false;
+ *mb = mask_lsb + mask_width - 1;
+ *me = mask_lsb;
+ return true;
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
+ int mask_width = base::bits::CountPopulation64(value);
+ int mask_msb = base::bits::CountLeadingZeros64(value);
+ int mask_lsb = base::bits::CountTrailingZeros64(value);
+ if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
+ return false;
+ *mb = mask_lsb + mask_width - 1;
+ *me = mask_lsb;
+ return true;
+}
+#endif
+
+
+// TODO(mbrandy): Absorb rotate-right into rlwinm?
+void InstructionSelector::VisitWord32And(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ int mb;
+ int me;
+ if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
+ int sh = 0;
+ Node* left = m.left().node();
+ if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
+ CanCover(node, left)) {
+ // Try to absorb left/right shift into rlwinm
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 31)) {
+ left = mleft.left().node();
+ sh = mleft.right().Value();
+ if (m.left().IsWord32Shr()) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 31 - sh) mb = 31 - sh;
+ sh = (32 - sh) & 0x1f;
+ } else {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ }
+ }
+ }
+ if (mb >= me) {
+ Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left),
+ g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kPPC_And32, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// TODO(mbrandy): Absorb rotate-right into rldic?
+void InstructionSelector::VisitWord64And(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ int mb;
+ int me;
+ if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+ int sh = 0;
+ Node* left = m.left().node();
+ if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
+ CanCover(node, left)) {
+ // Try to absorb left/right shift into rldic
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 63)) {
+ left = mleft.left().node();
+ sh = mleft.right().Value();
+ if (m.left().IsWord64Shr()) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 63 - sh) mb = 63 - sh;
+ sh = (64 - sh) & 0x3f;
+ } else {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ }
+ }
+ }
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearRight64;
+ mask = me;
+ } else if (sh && me <= sh && m.left().IsWord64Shl()) {
+ match = true;
+ opcode = kPPC_RotLeftAndClear64;
+ mask = mb;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+ g.TempImmediate(sh), g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kPPC_And64, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ Int32BinopMatcher m(node);
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kPPC_Or32, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Or(Node* node) {
+ Int64BinopMatcher m(node);
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kPPC_Or64, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kPPC_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor32, kInt16Imm_Unsigned);
+ }
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kPPC_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor64, kInt16Imm_Unsigned);
+ }
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+ // Try to absorb logical-and into rlwinm
+ Int32BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ if (mb >= me) {
+ Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftLeft32, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ // TODO(mbrandy): eliminate left sign extension if right >= 32
+ if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+ // Try to absorb logical-and into rldic
+ Int64BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearRight64;
+ mask = me;
+ } else if (sh && me <= sh) {
+ match = true;
+ opcode = kPPC_RotLeftAndClear64;
+ mask = mb;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftLeft64, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+ // Try to absorb logical-and into rlwinm
+ Int32BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 31 - sh) mb = 31 - sh;
+ sh = (32 - sh) & 0x1f;
+ if (mb >= me) {
+ Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftRight32, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+ // Try to absorb logical-and into rldic
+ Int64BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 63 - sh) mb = 63 - sh;
+ sh = (64 - sh) & 0x3f;
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearRight64;
+ mask = me;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftRight64, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ // Replace with sign extension for (x << K) >> K where K is 16 or 24.
+ if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(16) && m.right().Is(16)) {
+ Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if (mleft.right().Is(24) && m.right().Is(24)) {
+ Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+ VisitRRO(this, node, kPPC_ShiftRightAlg32, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ VisitRRO(this, node, kPPC_ShiftRightAlg64, kShift64Imm);
+}
+#endif
+
+
+// TODO(mbrandy): Absorb logical-and into rlwinm?
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, node, kPPC_RotRight32, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// TODO(mbrandy): Absorb logical-and into rldic?
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, node, kPPC_RotRight64, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Add(Node* node) {
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kPPC_Neg32, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub32, kInt16Imm_Negate);
+ }
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kPPC_Neg64, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub64, kInt16Imm_Negate);
+ }
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ VisitRRR(this, node, kPPC_Mul32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ VisitRRR(this, node, kPPC_Mul64);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitRRR(this, node, kPPC_Div32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Div(Node* node) {
+ VisitRRR(this, node, kPPC_Div64);
+}
+#endif
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ VisitRRR(this, node, kPPC_DivU32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitUint64Div(Node* node) {
+ VisitRRR(this, node, kPPC_DivU64);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitRRR(this, node, kPPC_Mod32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ VisitRRR(this, node, kPPC_Mod64);
+}
+#endif
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ VisitRRR(this, node, kPPC_ModU32);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ VisitRRR(this, node, kPPC_ModU64);
+}
+#endif
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float32ToFloat64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Int32ToFloat64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Uint32ToFloat64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float64ToInt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float64ToUint32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ PPCOperandGenerator g(this);
+ Emit(kPPC_ExtendSignWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Uint32ToUint64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float64ToFloat32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ PPCOperandGenerator g(this);
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ Emit(kPPC_Int64ToInt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ // TODO(mbrandy): detect multiply-add
+ VisitRRRFloat64(this, node, kPPC_AddFloat64);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ // TODO(mbrandy): detect multiply-subtract
+ VisitRRRFloat64(this, node, kPPC_SubFloat64);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ // TODO(mbrandy): detect negate
+ VisitRRRFloat64(this, node, kPPC_MulFloat64);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRRFloat64(this, node, kPPC_DivFloat64);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_ModFloat64, g.DefineAsFixed(node, d1),
+ g.UseFixed(node->InputAt(0), d1),
+ g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRRFloat64(this, kPPC_SqrtFloat64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+ VisitRRFloat64(this, kPPC_FloorFloat64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+ VisitRRFloat64(this, kPPC_CeilFloat64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRRFloat64(this, kPPC_TruncateFloat64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ VisitRRFloat64(this, kPPC_RoundFloat64, node);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
+ kInt16Imm, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32, kInt16Imm,
+ &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
+ kInt16Imm_Negate, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
+ kInt16Imm_Negate, &cont);
+}
+
+
+static bool CompareLogical(FlagsContinuation* cont) {
+ switch (cont->condition()) {
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative, ImmediateMode immediate_mode) {
+ PPCOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, immediate_mode)) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else if (g.CanBeImmediate(left, immediate_mode)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+
+static void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+static void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode);
+}
+#endif
+
+
+// Shared routine for multiple float compare operations.
+static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kPPC_CmpFloat64, g.UseRegister(left),
+ g.UseRegister(right), cont);
+}
+
+
+// Shared routine for word comparisons against zero.
+static void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, InstructionCode opcode,
+ FlagsContinuation* cont) {
+ while (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(selector, value, cont);
+ }
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+#if V8_TARGET_ARCH_PPC64
+ case IrOpcode::kWord64Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(selector, value, cont);
+ }
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+#endif
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == NULL || selector->IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(
+ selector, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(selector, node,
+ kPPC_SubWithOverflow32,
+ kInt16Imm_Negate, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Sub:
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kWord32And:
+ // TODO(mbandy): opportunity for rlwinm?
+ return VisitWordCompare(selector, value, kPPC_Tst32, cont, true,
+ kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt32Add:
+// case IrOpcode::kWord32Or:
+// case IrOpcode::kWord32Xor:
+// case IrOpcode::kWord32Sar:
+// case IrOpcode::kWord32Shl:
+// case IrOpcode::kWord32Shr:
+// case IrOpcode::kWord32Ror:
+#if V8_TARGET_ARCH_PPC64
+ case IrOpcode::kInt64Sub:
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kWord64And:
+ // TODO(mbandy): opportunity for rldic?
+ return VisitWordCompare(selector, value, kPPC_Tst64, cont, true,
+ kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt64Add:
+// case IrOpcode::kWord64Or:
+// case IrOpcode::kWord64Xor:
+// case IrOpcode::kWord64Sar:
+// case IrOpcode::kWord64Shl:
+// case IrOpcode::kWord64Shr:
+// case IrOpcode::kWord64Ror:
+#endif
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ PPCOperandGenerator g(selector);
+ VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
+ cont);
+}
+
+
+static void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ VisitWordCompareZero(selector, user, value, kPPC_Cmp32, cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+static void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ VisitWordCompareZero(selector, user, value, kPPC_Cmp64, cont);
+}
+#endif
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+ VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+#endif
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* node) {
+ PPCOperandGenerator g(this);
+ const CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on PPC it's probably better to use the code object in a
+ // register if there are multiple uses of it. Improve constant pool and the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ // Push any stack arguments.
+ // TODO(mbrandy): reverse order and use push only for first
+ for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
+ i++) {
+ Emit(kPPC_Push, g.NoOutput(), g.UseRegister(*i));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ InstructionOperand* first_output =
+ buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
+ Instruction* call_instr =
+ Emit(opcode, buffer.outputs.size(), first_output,
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
+ call_instr->MarkAsCall();
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::kFloat64Floor |
+ MachineOperatorBuilder::kFloat64Ceil |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesAway;
+ // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/ppc/linkage-ppc.cc b/deps/v8/src/compiler/ppc/linkage-ppc.cc
new file mode 100644
index 0000000000..38117222a9
--- /dev/null
+++ b/deps/v8/src/compiler/ppc/linkage-ppc.cc
@@ -0,0 +1,69 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct PPCLinkageHelperTraits {
+ static Register ReturnValueReg() { return r3; }
+ static Register ReturnValue2Reg() { return r4; }
+ static Register JSCallFunctionReg() { return r4; }
+ static Register ContextReg() { return cp; }
+ static Register RuntimeCallFunctionReg() { return r4; }
+ static Register RuntimeCallArgCountReg() { return r3; }
+ static RegList CCalleeSaveRegisters() {
+ return r14.bit() | r15.bit() | r16.bit() | r17.bit() | r18.bit() |
+ r19.bit() | r20.bit() | r21.bit() | r22.bit() | r23.bit() |
+ r24.bit() | r25.bit() | r26.bit() | r27.bit() | r28.bit() |
+ r29.bit() | r30.bit() | fp.bit();
+ }
+ static Register CRegisterParameter(int i) {
+ static Register register_parameters[] = {r3, r4, r5, r6, r7, r8, r9, r10};
+ return register_parameters[i];
+ }
+ static int CRegisterParametersLength() { return 8; }
+};
+
+
+typedef LinkageHelper<PPCLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
+ CallDescriptor::Flags flags) {
+ return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
+ return LH::GetStubCallDescriptor(isolate, zone, descriptor,
+ stack_parameter_count, flags, properties);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ const MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index b93ec66338..489c2ca6d9 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -11,11 +11,11 @@ namespace v8 {
namespace internal {
namespace compiler {
-RawMachineAssembler::RawMachineAssembler(Graph* graph,
- MachineSignature* machine_sig,
+RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
+ const MachineSignature* machine_sig,
MachineType word,
MachineOperatorBuilder::Flags flags)
- : GraphBuilder(graph),
+ : GraphBuilder(isolate, graph),
schedule_(new (zone()) Schedule(zone())),
machine_(zone(), word, flags),
common_(zone()),
@@ -76,6 +76,30 @@ void RawMachineAssembler::Branch(Node* condition, Label* true_val,
}
+void RawMachineAssembler::Switch(Node* index, Label* default_label,
+ int32_t* case_values, Label** case_labels,
+ size_t case_count) {
+ DCHECK_NE(schedule()->end(), current_block_);
+ size_t succ_count = case_count + 1;
+ Node* switch_node = NewNode(common()->Switch(succ_count), index);
+ BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t case_value = case_values[index];
+ BasicBlock* case_block = Use(case_labels[index]);
+ Node* case_node =
+ graph()->NewNode(common()->IfValue(case_value), switch_node);
+ schedule()->AddNode(case_block, case_node);
+ succ_blocks[index] = case_block;
+ }
+ BasicBlock* default_block = Use(default_label);
+ Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
+ schedule()->AddNode(default_block, default_node);
+ succ_blocks[case_count] = default_block;
+ schedule()->AddSwitch(CurrentBlock(), switch_node, succ_blocks, succ_count);
+ current_block_ = nullptr;
+}
+
+
void RawMachineAssembler::Return(Node* value) {
schedule()->AddReturn(CurrentBlock(), value);
current_block_ = NULL;
@@ -87,8 +111,8 @@ Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
CallFunctionFlags flags) {
Callable callable = CodeFactory::CallFunction(isolate(), 0, flags);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- callable.descriptor(), 1, CallDescriptor::kNeedsFrameState,
- Operator::kNoProperties, zone());
+ isolate(), zone(), callable.descriptor(), 1,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
Node* stub_code = HeapConstant(callable.code());
Node* call = graph()->NewNode(common()->Call(desc), stub_code, function,
receiver, context, frame_state);
@@ -99,8 +123,8 @@ Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver,
Node* context, Node* frame_state) {
- CallDescriptor* descriptor =
- Linkage::GetJSCallDescriptor(1, zone(), CallDescriptor::kNeedsFrameState);
+ CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
+ zone(), false, 1, CallDescriptor::kNeedsFrameState);
Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver,
context, frame_state);
schedule()->AddNode(CurrentBlock(), call);
@@ -112,7 +136,7 @@ Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
Node* arg0, Node* context,
Node* frame_state) {
CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- function, 1, Operator::kNoProperties, zone());
+ zone(), function, 1, Operator::kNoProperties);
Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
Node* ref = NewNode(
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 5455814fa2..04b1dc6f57 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -5,8 +5,6 @@
#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
-#include "src/v8.h"
-
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-builder.h"
#include "src/compiler/linkage.h"
@@ -44,19 +42,19 @@ class RawMachineAssembler : public GraphBuilder {
DISALLOW_COPY_AND_ASSIGN(Label);
};
- RawMachineAssembler(Graph* graph, MachineSignature* machine_sig,
+ RawMachineAssembler(Isolate* isolate, Graph* graph,
+ const MachineSignature* machine_sig,
MachineType word = kMachPtr,
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::Flag::kNoFlags);
~RawMachineAssembler() OVERRIDE {}
- Isolate* isolate() const { return zone()->isolate(); }
Zone* zone() const { return graph()->zone(); }
MachineOperatorBuilder* machine() { return &machine_; }
CommonOperatorBuilder* common() { return &common_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
size_t parameter_count() const { return machine_sig_->parameter_count(); }
- MachineSignature* machine_sig() const { return machine_sig_; }
+ const MachineSignature* machine_sig() const { return machine_sig_; }
Node* UndefinedConstant() {
Unique<HeapObject> unique = Unique<HeapObject>::CreateImmovable(
@@ -402,6 +400,8 @@ class RawMachineAssembler : public GraphBuilder {
Label* Exit();
void Goto(Label* label);
void Branch(Node* condition, Label* true_val, Label* false_val);
+ void Switch(Node* index, Label* default_label, int32_t* case_values,
+ Label** case_labels, size_t case_count);
// Call through CallFunctionStub with lazy deopt and frame-state.
Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
Node* frame_state, CallFunctionFlags flags);
@@ -448,7 +448,7 @@ class RawMachineAssembler : public GraphBuilder {
Schedule* schedule_;
MachineOperatorBuilder machine_;
CommonOperatorBuilder common_;
- MachineSignature* machine_sig_;
+ const MachineSignature* machine_sig_;
CallDescriptor* call_descriptor_;
Node** parameters_;
Label exit_label_;
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index dabfd59ef6..434e965bf3 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/bit-vector.h"
#include "src/compiler/instruction.h"
#include "src/compiler/register-allocator-verifier.h"
@@ -19,7 +20,7 @@ static void VerifyGapEmpty(const GapInstruction* gap) {
i <= GapInstruction::LAST_INNER_POSITION; i++) {
GapInstruction::InnerPosition inner_pos =
static_cast<GapInstruction::InnerPosition>(i);
- CHECK_EQ(NULL, gap->GetParallelMove(inner_pos));
+ CHECK(!gap->GetParallelMove(inner_pos));
}
}
@@ -28,7 +29,7 @@ void RegisterAllocatorVerifier::VerifyInput(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
if (constraint.type_ != kImmediate) {
- CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
}
@@ -39,15 +40,13 @@ void RegisterAllocatorVerifier::VerifyTemp(
CHECK_NE(kSameAsFirst, constraint.type_);
CHECK_NE(kImmediate, constraint.type_);
CHECK_NE(kConstant, constraint.type_);
- CHECK_EQ(UnallocatedOperand::kInvalidVirtualRegister,
- constraint.virtual_register_);
}
void RegisterAllocatorVerifier::VerifyOutput(
const OperandConstraint& constraint) {
CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -62,8 +61,7 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
// kSameAsFirst along the way.
for (const auto* instr : sequence->instructions()) {
const size_t operand_count = OperandCount(instr);
- auto* op_constraints =
- zone->NewArray<OperandConstraint>(static_cast<int>(operand_count));
+ auto* op_constraints = zone->NewArray<OperandConstraint>(operand_count);
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
BuildConstraint(instr->InputAt(i), &op_constraints[count]);
@@ -121,7 +119,7 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
OperandConstraint* constraint) {
constraint->value_ = kMinInt;
- constraint->virtual_register_ = UnallocatedOperand::kInvalidVirtualRegister;
+ constraint->virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
if (op->IsConstant()) {
constraint->type_ = kConstant;
constraint->value_ = ConstantOperand::cast(op)->index();
@@ -214,89 +212,121 @@ void RegisterAllocatorVerifier::CheckConstraint(
}
}
+namespace {
+
+typedef BasicBlock::RpoNumber Rpo;
-class RegisterAllocatorVerifier::OutgoingMapping : public ZoneObject {
+static const int kInvalidVreg = InstructionOperand::kInvalidVirtualRegister;
+
+struct PhiData : public ZoneObject {
+ PhiData(Rpo definition_rpo, const PhiInstruction* phi, int first_pred_vreg,
+ const PhiData* first_pred_phi, Zone* zone)
+ : definition_rpo(definition_rpo),
+ virtual_register(phi->virtual_register()),
+ first_pred_vreg(first_pred_vreg),
+ first_pred_phi(first_pred_phi),
+ operands(zone) {
+ operands.reserve(phi->operands().size());
+ operands.insert(operands.begin(), phi->operands().begin(),
+ phi->operands().end());
+ }
+ const Rpo definition_rpo;
+ const int virtual_register;
+ const int first_pred_vreg;
+ const PhiData* first_pred_phi;
+ IntVector operands;
+};
+
+class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
public:
- struct OperandLess {
- bool operator()(const InstructionOperand* a,
- const InstructionOperand* b) const {
- if (a->kind() == b->kind()) return a->index() < b->index();
- return a->kind() < b->kind();
- }
+ explicit PhiMap(Zone* zone) : ZoneMap<int, PhiData*>(zone) {}
+};
+
+struct OperandLess {
+ bool operator()(const InstructionOperand* a,
+ const InstructionOperand* b) const {
+ if (a->kind() == b->kind()) return a->index() < b->index();
+ return a->kind() < b->kind();
+ }
+};
+
+class OperandMap : public ZoneObject {
+ public:
+ struct MapValue : public ZoneObject {
+ MapValue()
+ : incoming(nullptr),
+ define_vreg(kInvalidVreg),
+ use_vreg(kInvalidVreg),
+ succ_vreg(kInvalidVreg) {}
+ MapValue* incoming; // value from first predecessor block.
+ int define_vreg; // valid if this value was defined in this block.
+ int use_vreg; // valid if this value was used in this block.
+ int succ_vreg; // valid if propagated back from successor block.
};
- typedef std::map<
- const InstructionOperand*, int, OperandLess,
- zone_allocator<std::pair<const InstructionOperand*, const int>>>
- LocationMap;
-
- explicit OutgoingMapping(Zone* zone)
- : locations_(LocationMap::key_compare(),
- LocationMap::allocator_type(zone)),
- predecessor_intersection_(LocationMap::key_compare(),
- LocationMap::allocator_type(zone)) {}
-
- LocationMap* locations() { return &locations_; }
-
- void RunPhis(const InstructionSequence* sequence,
- const InstructionBlock* block, size_t phi_index) {
- // This operation is only valid in edge split form.
- size_t predecessor_index = block->predecessors()[phi_index].ToSize();
- CHECK(sequence->instruction_blocks()[predecessor_index]->SuccessorCount() ==
- 1);
- for (const auto* phi : block->phis()) {
- auto input = phi->inputs()[phi_index];
- CHECK(locations()->find(input) != locations()->end());
- auto it = locations()->find(phi->output());
- CHECK(it != locations()->end());
- if (input->IsConstant()) {
- CHECK_EQ(it->second, input->index());
- } else {
- CHECK_EQ(it->second, phi->operands()[phi_index]);
+ class Map
+ : public ZoneMap<const InstructionOperand*, MapValue*, OperandLess> {
+ public:
+ explicit Map(Zone* zone)
+ : ZoneMap<const InstructionOperand*, MapValue*, OperandLess>(zone) {}
+
+ // Remove all entries with keys not in other.
+ void Intersect(const Map& other) {
+ if (this->empty()) return;
+ auto it = this->begin();
+ OperandLess less;
+ for (const auto& o : other) {
+ while (less(it->first, o.first)) {
+ this->erase(it++);
+ if (it == this->end()) return;
+ }
+ if (it->first->Equals(o.first)) {
+ ++it;
+ if (it == this->end()) return;
+ } else {
+ CHECK(less(o.first, it->first));
+ }
}
- it->second = phi->virtual_register();
}
- }
+ };
- void RunGapInstruction(Zone* zone, const GapInstruction* gap) {
- for (int i = GapInstruction::FIRST_INNER_POSITION;
- i <= GapInstruction::LAST_INNER_POSITION; i++) {
- GapInstruction::InnerPosition inner_pos =
- static_cast<GapInstruction::InnerPosition>(i);
- const ParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move == nullptr) continue;
- RunParallelMoves(zone, move);
- }
- }
+ explicit OperandMap(Zone* zone) : map_(zone) {}
+
+ Map& map() { return map_; }
void RunParallelMoves(Zone* zone, const ParallelMove* move) {
// Compute outgoing mappings.
- LocationMap to_insert((LocationMap::key_compare()),
- LocationMap::allocator_type(zone));
- auto* moves = move->move_operands();
+ Map to_insert(zone);
+ auto moves = move->move_operands();
for (auto i = moves->begin(); i != moves->end(); ++i) {
if (i->IsEliminated()) continue;
- auto cur = locations()->find(i->source());
- CHECK(cur != locations()->end());
+ auto cur = map().find(i->source());
+ CHECK(cur != map().end());
to_insert.insert(std::make_pair(i->destination(), cur->second));
}
// Drop current mappings.
for (auto i = moves->begin(); i != moves->end(); ++i) {
if (i->IsEliminated()) continue;
- auto cur = locations()->find(i->destination());
- if (cur != locations()->end()) locations()->erase(cur);
+ auto cur = map().find(i->destination());
+ if (cur != map().end()) map().erase(cur);
}
// Insert new values.
- locations()->insert(to_insert.begin(), to_insert.end());
+ map().insert(to_insert.begin(), to_insert.end());
}
- void Map(const InstructionOperand* op, int virtual_register) {
- locations()->insert(std::make_pair(op, virtual_register));
+ void RunGapInstruction(Zone* zone, const GapInstruction* gap) {
+ for (int i = GapInstruction::FIRST_INNER_POSITION;
+ i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ auto inner_pos = static_cast<GapInstruction::InnerPosition>(i);
+ auto move = gap->GetParallelMove(inner_pos);
+ if (move == nullptr) continue;
+ RunParallelMoves(zone, move);
+ }
}
void Drop(const InstructionOperand* op) {
- auto it = locations()->find(op);
- if (it != locations()->end()) locations()->erase(it);
+ auto it = map().find(op);
+ if (it != map().end()) map().erase(it);
}
void DropRegisters(const RegisterConfiguration* config) {
@@ -310,131 +340,316 @@ class RegisterAllocatorVerifier::OutgoingMapping : public ZoneObject {
}
}
- void InitializeFromFirstPredecessor(const InstructionSequence* sequence,
- const OutgoingMappings* outgoing_mappings,
- const InstructionBlock* block) {
- if (block->predecessors().empty()) return;
- size_t predecessor_index = block->predecessors()[0].ToSize();
- CHECK(predecessor_index < block->rpo_number().ToSize());
- auto* incoming = outgoing_mappings->at(predecessor_index);
- if (block->PredecessorCount() > 1) {
- // Update incoming map with phis. The remaining phis will be checked later
- // as their mappings are not guaranteed to exist yet.
- incoming->RunPhis(sequence, block, 0);
+ void Define(Zone* zone, const InstructionOperand* op, int virtual_register) {
+ auto value = new (zone) MapValue();
+ value->define_vreg = virtual_register;
+ auto res = map().insert(std::make_pair(op, value));
+ if (!res.second) res.first->second = value;
+ }
+
+ void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
+ auto it = map().find(op);
+ CHECK(it != map().end());
+ auto v = it->second;
+ if (v->define_vreg != kInvalidVreg) {
+ CHECK_EQ(v->define_vreg, use_vreg);
}
- // Now initialize outgoing mapping for this block with incoming mapping.
- CHECK(locations_.empty());
- locations_ = incoming->locations_;
+ // Already used this vreg in this block.
+ if (v->use_vreg != kInvalidVreg) {
+ CHECK_EQ(v->use_vreg, use_vreg);
+ return;
+ }
+ if (!initial_pass) {
+ // A value may be defined and used in this block or the use must have
+ // propagated up.
+ if (v->succ_vreg != kInvalidVreg) {
+ CHECK_EQ(v->succ_vreg, use_vreg);
+ } else {
+ CHECK_EQ(v->define_vreg, use_vreg);
+ }
+ // Mark the use.
+ it->second->use_vreg = use_vreg;
+ return;
+ }
+ // Go up block list and ensure the correct definition is reached.
+ for (; v != nullptr; v = v->incoming) {
+ // Value unused in block.
+ if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
+ continue;
+ }
+ // Found correct definition or use.
+ CHECK(v->define_vreg == use_vreg || v->use_vreg == use_vreg);
+ // Mark the use.
+ it->second->use_vreg = use_vreg;
+ return;
+ }
+ // Use of a non-phi value without definition.
+ CHECK(false);
}
- void InitializeFromIntersection() { locations_ = predecessor_intersection_; }
+ void UsePhi(const InstructionOperand* op, const PhiData* phi,
+ bool initial_pass) {
+ auto it = map().find(op);
+ CHECK(it != map().end());
+ auto v = it->second;
+ int use_vreg = phi->virtual_register;
+ // Phis are not defined.
+ CHECK_EQ(kInvalidVreg, v->define_vreg);
+ // Already used this vreg in this block.
+ if (v->use_vreg != kInvalidVreg) {
+ CHECK_EQ(v->use_vreg, use_vreg);
+ return;
+ }
+ if (!initial_pass) {
+ // A used phi must have propagated its use to a predecessor.
+ CHECK_EQ(v->succ_vreg, use_vreg);
+ // Mark the use.
+ v->use_vreg = use_vreg;
+ return;
+ }
+ // Go up the block list starting at the first predecessor and ensure this
+ // phi has a correct use or definition.
+ for (v = v->incoming; v != nullptr; v = v->incoming) {
+ // Value unused in block.
+ if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
+ continue;
+ }
+ // Found correct definition or use.
+ if (v->define_vreg != kInvalidVreg) {
+ CHECK(v->define_vreg == phi->first_pred_vreg);
+ } else if (v->use_vreg != phi->first_pred_vreg) {
+ // Walk the phi chain, hunting for a matching phi use.
+ auto p = phi;
+ for (; p != nullptr; p = p->first_pred_phi) {
+ if (p->virtual_register == v->use_vreg) break;
+ }
+ CHECK(p);
+ }
+ // Mark the use.
+ it->second->use_vreg = use_vreg;
+ return;
+ }
+ // Use of a phi value without definition.
+ UNREACHABLE();
+ }
+
+ private:
+ Map map_;
+ DISALLOW_COPY_AND_ASSIGN(OperandMap);
+};
+
+} // namespace
+
+
+class RegisterAllocatorVerifier::BlockMaps {
+ public:
+ BlockMaps(Zone* zone, const InstructionSequence* sequence)
+ : zone_(zone),
+ sequence_(sequence),
+ phi_map_guard_(sequence->VirtualRegisterCount(), zone),
+ phi_map_(zone),
+ incoming_maps_(zone),
+ outgoing_maps_(zone) {
+ InitializePhis();
+ InitializeOperandMaps();
+ }
+
+ bool IsPhi(int virtual_register) {
+ return phi_map_guard_.Contains(virtual_register);
+ }
- void InitializeIntersection(const OutgoingMapping* incoming) {
- CHECK(predecessor_intersection_.empty());
- predecessor_intersection_ = incoming->locations_;
+ const PhiData* GetPhi(int virtual_register) {
+ auto it = phi_map_.find(virtual_register);
+ CHECK(it != phi_map_.end());
+ return it->second;
}
- void Intersect(const OutgoingMapping* other) {
- if (predecessor_intersection_.empty()) return;
- auto it = predecessor_intersection_.begin();
- OperandLess less;
- for (const auto& o : other->locations_) {
- while (less(it->first, o.first)) {
- ++it;
- if (it == predecessor_intersection_.end()) return;
+ OperandMap* InitializeIncoming(size_t block_index, bool initial_pass) {
+ return initial_pass ? InitializeFromFirstPredecessor(block_index)
+ : InitializeFromIntersection(block_index);
+ }
+
+ void PropagateUsesBackwards() {
+ typedef std::set<size_t, std::greater<size_t>, zone_allocator<size_t>>
+ BlockIds;
+ BlockIds block_ids((BlockIds::key_compare()),
+ zone_allocator<size_t>(zone()));
+ // First ensure that incoming contains only keys in all predecessors.
+ for (auto block : sequence()->instruction_blocks()) {
+ size_t index = block->rpo_number().ToSize();
+ block_ids.insert(index);
+ auto& succ_map = incoming_maps_[index]->map();
+ for (size_t i = 0; i < block->PredecessorCount(); ++i) {
+ auto pred_rpo = block->predecessors()[i];
+ succ_map.Intersect(outgoing_maps_[pred_rpo.ToSize()]->map());
}
- if (it->first->Equals(o.first)) {
- if (o.second != it->second) {
- predecessor_intersection_.erase(it++);
- } else {
- ++it;
+ }
+ // Back propagation fixpoint.
+ while (!block_ids.empty()) {
+ // Pop highest block_id.
+ auto block_id_it = block_ids.begin();
+ const size_t succ_index = *block_id_it;
+ block_ids.erase(block_id_it);
+ // Propagate uses back to their definition blocks using succ_vreg.
+ auto block = sequence()->instruction_blocks()[succ_index];
+ auto& succ_map = incoming_maps_[succ_index]->map();
+ for (size_t i = 0; i < block->PredecessorCount(); ++i) {
+ for (auto& succ_val : succ_map) {
+ // An incoming map contains no defines.
+ CHECK_EQ(kInvalidVreg, succ_val.second->define_vreg);
+ // Compute succ_vreg.
+ int succ_vreg = succ_val.second->succ_vreg;
+ if (succ_vreg == kInvalidVreg) {
+ succ_vreg = succ_val.second->use_vreg;
+ // Initialize succ_vreg in back propagation chain.
+ succ_val.second->succ_vreg = succ_vreg;
+ }
+ if (succ_vreg == kInvalidVreg) continue;
+ // May need to transition phi.
+ if (IsPhi(succ_vreg)) {
+ auto phi = GetPhi(succ_vreg);
+ if (phi->definition_rpo.ToSize() == succ_index) {
+ // phi definition block, transition to pred value.
+ succ_vreg = phi->operands[i];
+ }
+ }
+ // Push succ_vreg up to all predecessors.
+ auto pred_rpo = block->predecessors()[i];
+ auto& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
+ auto& pred_val = *pred_map.find(succ_val.first);
+ if (pred_val.second->use_vreg != kInvalidVreg) {
+ CHECK_EQ(succ_vreg, pred_val.second->use_vreg);
+ }
+ if (pred_val.second->define_vreg != kInvalidVreg) {
+ CHECK_EQ(succ_vreg, pred_val.second->define_vreg);
+ }
+ if (pred_val.second->succ_vreg != kInvalidVreg) {
+ CHECK_EQ(succ_vreg, pred_val.second->succ_vreg);
+ } else {
+ pred_val.second->succ_vreg = succ_vreg;
+ block_ids.insert(pred_rpo.ToSize());
+ }
}
- if (it == predecessor_intersection_.end()) return;
+ }
+ }
+ // Clear uses and back links for second pass.
+ for (auto operand_map : incoming_maps_) {
+ for (auto& succ_val : operand_map->map()) {
+ succ_val.second->incoming = nullptr;
+ succ_val.second->use_vreg = kInvalidVreg;
}
}
}
private:
- LocationMap locations_;
- LocationMap predecessor_intersection_;
+ OperandMap* InitializeFromFirstPredecessor(size_t block_index) {
+ auto to_init = outgoing_maps_[block_index];
+ CHECK(to_init->map().empty());
+ auto block = sequence()->instruction_blocks()[block_index];
+ if (block->predecessors().empty()) return to_init;
+ size_t predecessor_index = block->predecessors()[0].ToSize();
+ // Ensure not a backedge.
+ CHECK(predecessor_index < block->rpo_number().ToSize());
+ auto incoming = outgoing_maps_[predecessor_index];
+ // Copy map and replace values.
+ to_init->map() = incoming->map();
+ for (auto& it : to_init->map()) {
+ auto incoming = it.second;
+ it.second = new (zone()) OperandMap::MapValue();
+ it.second->incoming = incoming;
+ }
+ // Copy to incoming map for second pass.
+ incoming_maps_[block_index]->map() = to_init->map();
+ return to_init;
+ }
- DISALLOW_COPY_AND_ASSIGN(OutgoingMapping);
-};
+ OperandMap* InitializeFromIntersection(size_t block_index) {
+ return incoming_maps_[block_index];
+ }
+ void InitializeOperandMaps() {
+ size_t block_count = sequence()->instruction_blocks().size();
+ incoming_maps_.reserve(block_count);
+ outgoing_maps_.reserve(block_count);
+ for (size_t i = 0; i < block_count; ++i) {
+ incoming_maps_.push_back(new (zone()) OperandMap(zone()));
+ outgoing_maps_.push_back(new (zone()) OperandMap(zone()));
+ }
+ }
-// Verify that all gap moves move the operands for a virtual register into the
-// correct location for every instruction.
-void RegisterAllocatorVerifier::VerifyGapMoves() {
- typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
- OutgoingMappings outgoing_mappings(
- static_cast<int>(sequence()->instruction_blocks().size()), nullptr,
- zone());
- // Construct all mappings, ignoring back edges and multiple entries.
- ConstructOutgoingMappings(&outgoing_mappings, true);
- // Run all remaining phis and compute the intersection of all predecessor
- // mappings.
- for (const auto* block : sequence()->instruction_blocks()) {
- if (block->PredecessorCount() == 0) continue;
- const size_t block_index = block->rpo_number().ToSize();
- auto* mapping = outgoing_mappings[block_index];
- bool initialized = false;
- // Walk predecessors in reverse to ensure Intersect is correctly working.
- // If it did nothing, the second pass would do exactly what the first pass
- // did.
- for (size_t phi_input = block->PredecessorCount() - 1; true; --phi_input) {
- const size_t pred_block_index = block->predecessors()[phi_input].ToSize();
- auto* incoming = outgoing_mappings[pred_block_index];
- if (phi_input != 0) incoming->RunPhis(sequence(), block, phi_input);
- if (!initialized) {
- mapping->InitializeIntersection(incoming);
- initialized = true;
- } else {
- mapping->Intersect(incoming);
+ void InitializePhis() {
+ const size_t block_count = sequence()->instruction_blocks().size();
+ for (size_t block_index = 0; block_index < block_count; ++block_index) {
+ const auto block = sequence()->instruction_blocks()[block_index];
+ for (auto phi : block->phis()) {
+ int first_pred_vreg = phi->operands()[0];
+ const PhiData* first_pred_phi = nullptr;
+ if (IsPhi(first_pred_vreg)) {
+ first_pred_phi = GetPhi(first_pred_vreg);
+ first_pred_vreg = first_pred_phi->first_pred_vreg;
+ }
+ CHECK(!IsPhi(first_pred_vreg));
+ auto phi_data = new (zone()) PhiData(
+ block->rpo_number(), phi, first_pred_vreg, first_pred_phi, zone());
+ auto res =
+ phi_map_.insert(std::make_pair(phi->virtual_register(), phi_data));
+ CHECK(res.second);
+ phi_map_guard_.Add(phi->virtual_register());
}
- if (phi_input == 0) break;
}
}
- // Construct all mappings again, this time using the instersection mapping
- // above as the incoming mapping instead of the result from the first
- // predecessor.
- ConstructOutgoingMappings(&outgoing_mappings, false);
+
+ typedef ZoneVector<OperandMap*> OperandMaps;
+ typedef ZoneVector<PhiData*> PhiVector;
+
+ Zone* zone() const { return zone_; }
+ const InstructionSequence* sequence() const { return sequence_; }
+
+ Zone* const zone_;
+ const InstructionSequence* const sequence_;
+ BitVector phi_map_guard_;
+ PhiMap phi_map_;
+ OperandMaps incoming_maps_;
+ OperandMaps outgoing_maps_;
+};
+
+
+void RegisterAllocatorVerifier::VerifyGapMoves() {
+ BlockMaps block_maps(zone(), sequence());
+ VerifyGapMoves(&block_maps, true);
+ block_maps.PropagateUsesBackwards();
+ VerifyGapMoves(&block_maps, false);
}
-void RegisterAllocatorVerifier::ConstructOutgoingMappings(
- OutgoingMappings* outgoing_mappings, bool initial_pass) {
- // Compute the locations of all virtual registers leaving every block, using
- // only the first predecessor as source for the input mapping.
- for (const auto* block : sequence()->instruction_blocks()) {
- const size_t block_index = block->rpo_number().ToSize();
- auto* current = outgoing_mappings->at(block_index);
- CHECK(initial_pass == (current == nullptr));
- // Initialize current.
- if (!initial_pass) {
- // Skip check second time around for blocks without multiple predecessors
- // as we have already executed this in the initial run.
- if (block->PredecessorCount() <= 1) continue;
- current->InitializeFromIntersection();
- } else {
- current = new (zone()) OutgoingMapping(zone());
- outgoing_mappings->at(block_index) = current;
- // Copy outgoing values from predecessor block.
- current->InitializeFromFirstPredecessor(sequence(), outgoing_mappings,
- block);
- }
- // Update current with gaps and operands for all instructions in block.
+// Compute and verify outgoing values for every block.
+void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
+ bool initial_pass) {
+ const size_t block_count = sequence()->instruction_blocks().size();
+ for (size_t block_index = 0; block_index < block_count; ++block_index) {
+ auto current = block_maps->InitializeIncoming(block_index, initial_pass);
+ const auto block = sequence()->instruction_blocks()[block_index];
for (int instr_index = block->code_start(); instr_index < block->code_end();
++instr_index) {
const auto& instr_constraint = constraints_[instr_index];
- const auto* instr = instr_constraint.instruction_;
- const auto* op_constraints = instr_constraint.operand_constraints_;
+ const auto instr = instr_constraint.instruction_;
+ if (instr->IsSourcePosition()) continue;
+ if (instr->IsGapMoves()) {
+ current->RunGapInstruction(zone(), GapInstruction::cast(instr));
+ continue;
+ }
+ const auto op_constraints = instr_constraint.operand_constraints_;
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
if (op_constraints[count].type_ == kImmediate) continue;
- auto it = current->locations()->find(instr->InputAt(i));
int virtual_register = op_constraints[count].virtual_register_;
- CHECK(it != current->locations()->end());
- CHECK_EQ(it->second, virtual_register);
+ auto op = instr->InputAt(i);
+ if (!block_maps->IsPhi(virtual_register)) {
+ current->Use(op, virtual_register, initial_pass);
+ } else {
+ auto phi = block_maps->GetPhi(virtual_register);
+ current->UsePhi(op, phi, initial_pass);
+ }
}
for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
current->Drop(instr->TempAt(i));
@@ -443,13 +658,8 @@ void RegisterAllocatorVerifier::ConstructOutgoingMappings(
current->DropRegisters(config());
}
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
- current->Drop(instr->OutputAt(i));
int virtual_register = op_constraints[count].virtual_register_;
- current->Map(instr->OutputAt(i), virtual_register);
- }
- if (instr->IsGapMoves()) {
- const auto* gap = GapInstruction::cast(instr);
- current->RunGapInstruction(zone(), gap);
+ current->Define(zone(), instr->OutputAt(i), virtual_register);
}
}
}
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index 4e35dc2457..86fda1670d 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -5,7 +5,6 @@
#ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
#define V8_REGISTER_ALLOCATOR_VERIFIER_H_
-#include "src/v8.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -49,10 +48,9 @@ class RegisterAllocatorVerifier FINAL : public ZoneObject {
OperandConstraint* operand_constraints_;
};
- class OutgoingMapping;
+ class BlockMaps;
typedef ZoneVector<InstructionConstraint> Constraints;
- typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
Zone* zone() const { return zone_; }
const RegisterConfiguration* config() { return config_; }
@@ -68,8 +66,7 @@ class RegisterAllocatorVerifier FINAL : public ZoneObject {
void CheckConstraint(const InstructionOperand* op,
const OperandConstraint* constraint);
- void ConstructOutgoingMappings(OutgoingMappings* outgoing_mappings,
- bool initial_pass);
+ void VerifyGapMoves(BlockMaps* outgoing_mappings, bool initial_pass);
Zone* const zone_;
const RegisterConfiguration* config_;
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 9eb4a470f2..1de5773e7f 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -135,11 +135,12 @@ LiveRange::LiveRange(int id, Zone* zone)
spills_at_definition_(nullptr) {}
-void LiveRange::set_assigned_register(int reg, Zone* zone) {
+void LiveRange::set_assigned_register(int reg,
+ InstructionOperandCache* operand_cache) {
DCHECK(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
// TODO(dcarney): stop aliasing hint operands.
- ConvertUsesToOperand(CreateAssignedOperand(zone));
+ ConvertUsesToOperand(GetAssignedOperand(operand_cache));
}
@@ -183,7 +184,7 @@ void LiveRange::SetSpillOperand(InstructionOperand* operand) {
void LiveRange::SetSpillRange(SpillRange* spill_range) {
DCHECK(HasNoSpillType() || HasSpillRange());
- DCHECK_NE(spill_range, nullptr);
+ DCHECK(spill_range);
spill_type_ = SpillType::kSpillRange;
spill_range_ = spill_range;
}
@@ -250,30 +251,47 @@ bool LiveRange::CanBeSpilled(LifetimePosition pos) {
}
-InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) const {
- InstructionOperand* op = nullptr;
+InstructionOperand* LiveRange::GetAssignedOperand(
+ InstructionOperandCache* cache) const {
if (HasRegisterAssigned()) {
DCHECK(!IsSpilled());
switch (Kind()) {
case GENERAL_REGISTERS:
- op = RegisterOperand::Create(assigned_register(), zone);
- break;
+ return cache->RegisterOperand(assigned_register());
case DOUBLE_REGISTERS:
- op = DoubleRegisterOperand::Create(assigned_register(), zone);
- break;
+ return cache->DoubleRegisterOperand(assigned_register());
default:
UNREACHABLE();
}
- } else {
- DCHECK(IsSpilled());
- DCHECK(!HasRegisterAssigned());
- op = TopLevel()->GetSpillOperand();
- DCHECK(!op->IsUnallocated());
}
+ DCHECK(IsSpilled());
+ DCHECK(!HasRegisterAssigned());
+ auto op = TopLevel()->GetSpillOperand();
+ DCHECK(!op->IsUnallocated());
return op;
}
+InstructionOperand LiveRange::GetAssignedOperand() const {
+ if (HasRegisterAssigned()) {
+ DCHECK(!IsSpilled());
+ switch (Kind()) {
+ case GENERAL_REGISTERS:
+ return RegisterOperand(assigned_register());
+ case DOUBLE_REGISTERS:
+ return DoubleRegisterOperand(assigned_register());
+ default:
+ UNREACHABLE();
+ }
+ }
+ DCHECK(IsSpilled());
+ DCHECK(!HasRegisterAssigned());
+ auto op = TopLevel()->GetSpillOperand();
+ DCHECK(!op->IsUnallocated());
+ return *op;
+}
+
+
UseInterval* LiveRange::FirstSearchIntervalForPosition(
LifetimePosition position) const {
if (current_interval_ == nullptr) return first_interval_;
@@ -553,6 +571,18 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
}
+InstructionOperandCache::InstructionOperandCache() {
+ for (size_t i = 0; i < arraysize(general_register_operands_); ++i) {
+ general_register_operands_[i] =
+ i::compiler::RegisterOperand(static_cast<int>(i));
+ }
+ for (size_t i = 0; i < arraysize(double_register_operands_); ++i) {
+ double_register_operands_[i] =
+ i::compiler::DoubleRegisterOperand(static_cast<int>(i));
+ }
+}
+
+
RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
Zone* zone, Frame* frame,
InstructionSequence* code,
@@ -562,7 +592,8 @@ RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
code_(code),
debug_name_(debug_name),
config_(config),
- phi_map_(PhiMap::key_compare(), PhiMap::allocator_type(local_zone())),
+ operand_cache_(new (code_zone()) InstructionOperandCache()),
+ phi_map_(local_zone()),
live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()),
live_ranges_(code->VirtualRegisterCount() * 2, nullptr, local_zone()),
fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
@@ -572,11 +603,9 @@ RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
unhandled_live_ranges_(local_zone()),
active_live_ranges_(local_zone()),
inactive_live_ranges_(local_zone()),
- reusable_slots_(local_zone()),
spill_ranges_(local_zone()),
mode_(UNALLOCATED_REGISTERS),
- num_registers_(-1),
- allocation_ok_(true) {
+ num_registers_(-1) {
DCHECK(this->config()->num_general_registers() <=
RegisterConfiguration::kMaxGeneralRegisters);
DCHECK(this->config()->num_double_registers() <=
@@ -589,7 +618,6 @@ RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
static_cast<size_t>(code->VirtualRegisterCount() * 2));
active_live_ranges().reserve(8);
inactive_live_ranges().reserve(8);
- reusable_slots().reserve(8);
spill_ranges().reserve(8);
assigned_registers_ =
new (code_zone()) BitVector(config->num_general_registers(), code_zone());
@@ -898,9 +926,7 @@ void SpillRange::MergeDisjointIntervals(UseInterval* other) {
}
-void RegisterAllocator::ReuseSpillSlots() {
- DCHECK(FLAG_turbo_reuse_spill_slots);
-
+void RegisterAllocator::AssignSpillSlots() {
// Merge disjoint spill ranges
for (size_t i = 0; i < spill_ranges().size(); i++) {
auto range = spill_ranges()[i];
@@ -922,7 +948,7 @@ void RegisterAllocator::ReuseSpillSlots() {
auto op_kind = kind == DOUBLE_REGISTERS
? InstructionOperand::DOUBLE_STACK_SLOT
: InstructionOperand::STACK_SLOT;
- auto op = new (code_zone()) InstructionOperand(op_kind, index);
+ auto op = InstructionOperand::New(code_zone(), op_kind, index);
range->SetOperand(op);
}
}
@@ -933,7 +959,7 @@ void RegisterAllocator::CommitAssignment() {
if (range == nullptr || range->IsEmpty()) continue;
// Register assignments were committed in set_assigned_register.
if (range->HasRegisterAssigned()) continue;
- auto assigned = range->CreateAssignedOperand(code_zone());
+ auto assigned = range->GetAssignedOperand(operand_cache());
range->ConvertUsesToOperand(assigned);
if (range->IsSpilled()) {
range->CommitSpillsAtDefinition(code(), assigned);
@@ -943,7 +969,6 @@ void RegisterAllocator::CommitAssignment() {
SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) {
- DCHECK(FLAG_turbo_reuse_spill_slots);
auto spill_range = new (local_zone()) SpillRange(range, local_zone());
spill_ranges().push_back(spill_range);
return spill_range;
@@ -951,7 +976,6 @@ SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) {
bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
- DCHECK(FLAG_turbo_reuse_spill_slots);
if (range->IsChild() || !range->is_phi()) return false;
DCHECK(range->HasNoSpillType());
@@ -1024,7 +1048,6 @@ bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
CHECK(first_op_spill->TryMerge(spill_range));
SpillBetween(range, range->Start(), pos->pos());
- if (!AllocationOk()) return false;
DCHECK(UnhandledIsSorted());
return true;
}
@@ -1043,7 +1066,6 @@ void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
if (i < end) instr = InstructionAt(i + 1);
if (i > start) prev_instr = InstructionAt(i - 1);
MeetConstraintsBetween(prev_instr, instr, i);
- if (!AllocationOk()) return;
}
}
@@ -1069,7 +1091,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
AllocateFixed(output, -1, false);
// This value is produced on the stack, we never need to spill it.
if (output->IsStackSlot()) {
- DCHECK(output->index() < 0);
+ DCHECK(output->index() < frame_->GetSpillSlotCount());
range->SetSpillOperand(output);
range->SetSpillStartIndex(end);
assigned = true;
@@ -1078,15 +1100,14 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
for (auto succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
DCHECK(successor->PredecessorCount() == 1);
- int gap_index = successor->first_instruction_index() + 1;
+ int gap_index = successor->first_instruction_index();
DCHECK(code()->IsGapAt(gap_index));
// Create an unconstrained operand for the same virtual register
// and insert a gap move from the fixed output to the operand.
UnallocatedOperand* output_copy =
- new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
- output_copy->set_virtual_register(output_vreg);
-
+ UnallocatedOperand(UnallocatedOperand::ANY, output_vreg)
+ .Copy(code_zone());
AddGapMove(gap_index, GapInstruction::START, output, output_copy);
}
}
@@ -1095,7 +1116,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
for (auto succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
DCHECK(successor->PredecessorCount() == 1);
- int gap_index = successor->first_instruction_index() + 1;
+ int gap_index = successor->first_instruction_index();
range->SpillAtDefinition(local_zone(), gap_index, output);
range->SetSpillStartIndex(gap_index);
}
@@ -1135,7 +1156,7 @@ void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
// This value is produced on the stack, we never need to spill it.
if (first_output->IsStackSlot()) {
- DCHECK(first_output->index() < 0);
+ DCHECK(first_output->index() < frame_->GetSpillSlotCount());
range->SetSpillOperand(first_output);
range->SetSpillStartIndex(gap_index - 1);
assigned = true;
@@ -1351,27 +1372,25 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
for (auto phi : block->phis()) {
- if (FLAG_turbo_reuse_spill_slots) {
- auto res = phi_map_.insert(
- std::make_pair(phi->virtual_register(), PhiMapValue(phi, block)));
- DCHECK(res.second);
- USE(res);
- }
- auto output = phi->output();
int phi_vreg = phi->virtual_register();
+ auto res =
+ phi_map_.insert(std::make_pair(phi_vreg, PhiMapValue(phi, block)));
+ DCHECK(res.second);
+ USE(res);
+ auto& output = phi->output();
if (!FLAG_turbo_delay_ssa_decon) {
for (size_t i = 0; i < phi->operands().size(); ++i) {
InstructionBlock* cur_block =
code()->InstructionBlockAt(block->predecessors()[i]);
AddGapMove(cur_block->last_instruction_index() - 1, GapInstruction::END,
- phi->inputs()[i], output);
+ &phi->inputs()[i], &output);
DCHECK(!InstructionAt(cur_block->last_instruction_index())
->HasPointerMap());
}
}
auto live_range = LiveRangeFor(phi_vreg);
int gap_index = block->first_instruction_index();
- live_range->SpillAtDefinition(local_zone(), gap_index, output);
+ live_range->SpillAtDefinition(local_zone(), gap_index, &output);
live_range->SetSpillStartIndex(gap_index);
// We use the phi-ness of some nodes in some later heuristics.
live_range->set_is_phi(true);
@@ -1407,7 +1426,7 @@ ParallelMove* RegisterAllocator::GetConnectingParallelMove(
}
int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::BEFORE,
+ (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::START,
code_zone());
}
@@ -1435,8 +1454,10 @@ void RegisterAllocator::ConnectRanges() {
}
if (should_insert) {
auto move = GetConnectingParallelMove(pos);
- auto prev_operand = first_range->CreateAssignedOperand(code_zone());
- auto cur_operand = second_range->CreateAssignedOperand(code_zone());
+ auto prev_operand =
+ first_range->GetAssignedOperand(operand_cache());
+ auto cur_operand =
+ second_range->GetAssignedOperand(operand_cache());
move->AddMove(prev_operand, cur_operand, code_zone());
}
}
@@ -1493,7 +1514,7 @@ class LiveRangeBoundArray {
void Initialize(Zone* zone, const LiveRange* const range) {
size_t length = 0;
for (auto i = range; i != nullptr; i = i->next()) length++;
- start_ = zone->NewArray<LiveRangeBound>(static_cast<int>(length));
+ start_ = zone->NewArray<LiveRangeBound>(length);
length_ = length;
auto curr = start_;
for (auto i = range; i != nullptr; i = i->next(), ++curr) {
@@ -1600,15 +1621,16 @@ void RegisterAllocator::ResolveControlFlow() {
auto* block_bound =
finder.ArrayFor(phi->virtual_register())->FindSucc(block);
auto phi_output =
- block_bound->range_->CreateAssignedOperand(code_zone());
- phi->output()->ConvertTo(phi_output->kind(), phi_output->index());
+ block_bound->range_->GetAssignedOperand(operand_cache());
+ phi->output().ConvertTo(phi_output->kind(), phi_output->index());
size_t pred_index = 0;
for (auto pred : block->predecessors()) {
const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
auto* pred_bound = finder.ArrayFor(phi->operands()[pred_index])
->FindPred(pred_block);
- auto pred_op = pred_bound->range_->CreateAssignedOperand(code_zone());
- phi->inputs()[pred_index] = pred_op;
+ auto pred_op =
+ pred_bound->range_->GetAssignedOperand(operand_cache());
+ phi->inputs()[pred_index] = *pred_op;
ResolveControlFlow(block, phi_output, pred_block, pred_op);
pred_index++;
}
@@ -1625,8 +1647,8 @@ void RegisterAllocator::ResolveControlFlow() {
if (result.cur_cover_ == result.pred_cover_ ||
result.cur_cover_->IsSpilled())
continue;
- auto pred_op = result.pred_cover_->CreateAssignedOperand(code_zone());
- auto cur_op = result.cur_cover_->CreateAssignedOperand(code_zone());
+ auto pred_op = result.pred_cover_->GetAssignedOperand(operand_cache());
+ auto cur_op = result.cur_cover_->GetAssignedOperand(operand_cache());
ResolveControlFlow(block, cur_op, pred_block, pred_op);
}
iterator.Advance();
@@ -1848,7 +1870,7 @@ void RegisterAllocator::PopulatePointerMaps() {
"Pointer in register for range %d (start at %d) "
"at safe point %d\n",
cur->id(), cur->Start().Value(), safe_point);
- InstructionOperand* operand = cur->CreateAssignedOperand(code_zone());
+ InstructionOperand* operand = cur->GetAssignedOperand(operand_cache());
DCHECK(!operand->IsStackSlot());
map->RecordPointer(operand, code_zone());
}
@@ -1883,7 +1905,6 @@ void RegisterAllocator::AllocateRegisters() {
SortUnhandled();
DCHECK(UnhandledIsSorted());
- DCHECK(reusable_slots().empty());
DCHECK(active_live_ranges().empty());
DCHECK(inactive_live_ranges().empty());
@@ -1932,18 +1953,12 @@ void RegisterAllocator::AllocateRegisters() {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
SpillBetween(current, current->Start(), pos->pos());
- if (!AllocationOk()) return;
DCHECK(UnhandledIsSorted());
continue;
}
}
- if (FLAG_turbo_reuse_spill_slots) {
- if (TryReuseSpillForPhi(current)) {
- continue;
- }
- if (!AllocationOk()) return;
- }
+ if (TryReuseSpillForPhi(current)) continue;
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
auto cur_active = active_live_ranges()[i];
@@ -1970,17 +1985,12 @@ void RegisterAllocator::AllocateRegisters() {
DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
bool result = TryAllocateFreeReg(current);
- if (!AllocationOk()) return;
-
if (!result) AllocateBlockedReg(current);
- if (!AllocationOk()) return;
-
if (current->HasRegisterAssigned()) {
AddToActive(current);
}
}
- reusable_slots().clear();
active_live_ranges().clear();
inactive_live_ranges().clear();
}
@@ -2076,36 +2086,9 @@ bool RegisterAllocator::UnhandledIsSorted() {
}
-void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
- DCHECK(!FLAG_turbo_reuse_spill_slots);
- // Check that we are the last range.
- if (range->next() != nullptr) return;
- if (!range->TopLevel()->HasSpillOperand()) return;
- auto spill_operand = range->TopLevel()->GetSpillOperand();
- if (spill_operand->IsConstant()) return;
- if (spill_operand->index() >= 0) {
- reusable_slots().push_back(range);
- }
-}
-
-
-InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
- DCHECK(!FLAG_turbo_reuse_spill_slots);
- if (reusable_slots().empty()) return nullptr;
- if (reusable_slots().front()->End().Value() >
- range->TopLevel()->Start().Value()) {
- return nullptr;
- }
- auto result = reusable_slots().front()->TopLevel()->GetSpillOperand();
- reusable_slots().erase(reusable_slots().begin());
- return result;
-}
-
-
void RegisterAllocator::ActiveToHandled(LiveRange* range) {
RemoveElement(&active_live_ranges(), range);
TraceAlloc("Moving live range %d from active to handled\n", range->id());
- if (!FLAG_turbo_reuse_spill_slots) FreeSpillSlot(range);
}
@@ -2119,7 +2102,6 @@ void RegisterAllocator::ActiveToInactive(LiveRange* range) {
void RegisterAllocator::InactiveToHandled(LiveRange* range) {
RemoveElement(&inactive_live_ranges(), range);
TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
- if (!FLAG_turbo_reuse_spill_slots) FreeSpillSlot(range);
}
@@ -2186,7 +2168,6 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
// Register reg is available at the range start but becomes blocked before
// the range end. Split current at position where it becomes blocked.
auto tail = SplitRangeAt(current, pos);
- if (!AllocationOk()) return false;
AddToUnhandledSorted(tail);
}
@@ -2267,7 +2248,6 @@ void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
// position.
LiveRange* tail = SplitBetween(current, current->Start(),
block_pos[reg].InstructionStart());
- if (!AllocationOk()) return;
AddToUnhandledSorted(tail);
}
@@ -2346,7 +2326,6 @@ void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
// current live-range is larger than their end.
SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
}
- if (!AllocationOk()) return;
ActiveToHandled(range);
--i;
}
@@ -2365,7 +2344,6 @@ void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
next_intersection = Min(next_intersection, next_pos->pos());
SpillBetween(range, split_pos, next_intersection);
}
- if (!AllocationOk()) return;
InactiveToHandled(range);
--i;
}
@@ -2376,7 +2354,8 @@ void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
return pos.IsInstructionStart() &&
- InstructionAt(pos.InstructionIndex())->IsBlockStart();
+ code()->GetInstructionBlock(pos.InstructionIndex())->code_start() ==
+ pos.InstructionIndex();
}
@@ -2393,7 +2372,6 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
!InstructionAt(pos.InstructionIndex())->IsControl());
int vreg = GetVirtualRegister();
- if (!AllocationOk()) return nullptr;
auto result = LiveRangeFor(vreg);
range->SplitAt(pos, result, local_zone());
return result;
@@ -2451,7 +2429,6 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
auto second_part = SplitRangeAt(range, pos);
- if (!AllocationOk()) return;
Spill(second_part);
}
@@ -2468,16 +2445,18 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
LifetimePosition end) {
CHECK(start.Value() < end.Value());
auto second_part = SplitRangeAt(range, start);
- if (!AllocationOk()) return;
if (second_part->Start().Value() < end.Value()) {
// The split result intersects with [start, end[.
// Split it at position between ]start+1, end[, spill the middle part
// and put the rest to unhandled.
+ auto third_part_end = end.PrevInstruction().InstructionEnd();
+ if (IsBlockBoundary(end.InstructionStart())) {
+ third_part_end = end.InstructionStart();
+ }
auto third_part = SplitBetween(
second_part, Max(second_part->Start().InstructionEnd(), until),
- end.PrevInstruction().InstructionEnd());
- if (!AllocationOk()) return;
+ third_part_end);
DCHECK(third_part != second_part);
@@ -2496,21 +2475,7 @@ void RegisterAllocator::Spill(LiveRange* range) {
TraceAlloc("Spilling live range %d\n", range->id());
auto first = range->TopLevel();
if (first->HasNoSpillType()) {
- if (FLAG_turbo_reuse_spill_slots) {
- AssignSpillRangeToLiveRange(first);
- } else {
- auto op = TryReuseSpillSlot(range);
- if (op == nullptr) {
- // Allocate a new operand referring to the spill slot.
- RegisterKind kind = range->Kind();
- int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
- auto op_kind = kind == DOUBLE_REGISTERS
- ? InstructionOperand::DOUBLE_STACK_SLOT
- : InstructionOperand::STACK_SLOT;
- op = new (code_zone()) InstructionOperand(op_kind, index);
- }
- first->SetSpillOperand(op);
- }
+ AssignSpillRangeToLiveRange(first);
}
range->MakeSpilled();
}
@@ -2540,7 +2505,7 @@ void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
DCHECK(range->Kind() == GENERAL_REGISTERS);
assigned_registers_->Add(reg);
}
- range->set_assigned_register(reg, code_zone());
+ range->set_assigned_register(reg, operand_cache());
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index b17837ba59..d7dd1b7358 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -173,6 +173,33 @@ class UsePosition FINAL : public ZoneObject {
class SpillRange;
+
+// TODO(dcarney): remove this cache.
+class InstructionOperandCache FINAL : public ZoneObject {
+ public:
+ InstructionOperandCache();
+
+ InstructionOperand* RegisterOperand(int index) {
+ DCHECK(index >= 0 &&
+ index < static_cast<int>(arraysize(general_register_operands_)));
+ return &general_register_operands_[index];
+ }
+ InstructionOperand* DoubleRegisterOperand(int index) {
+ DCHECK(index >= 0 &&
+ index < static_cast<int>(arraysize(double_register_operands_)));
+ return &double_register_operands_[index];
+ }
+
+ private:
+ InstructionOperand
+ general_register_operands_[RegisterConfiguration::kMaxGeneralRegisters];
+ InstructionOperand
+ double_register_operands_[RegisterConfiguration::kMaxDoubleRegisters];
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionOperandCache);
+};
+
+
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
class LiveRange FINAL : public ZoneObject {
@@ -193,10 +220,12 @@ class LiveRange FINAL : public ZoneObject {
int id() const { return id_; }
bool IsFixed() const { return id_ < 0; }
bool IsEmpty() const { return first_interval() == nullptr; }
- InstructionOperand* CreateAssignedOperand(Zone* zone) const;
+ // TODO(dcarney): remove this.
+ InstructionOperand* GetAssignedOperand(InstructionOperandCache* cache) const;
+ InstructionOperand GetAssignedOperand() const;
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg, Zone* zone);
+ void set_assigned_register(int reg, InstructionOperandCache* cache);
void MakeSpilled();
bool is_phi() const { return is_phi_; }
void set_is_phi(bool is_phi) { is_phi_ = is_phi; }
@@ -377,8 +406,6 @@ class RegisterAllocator FINAL : public ZoneObject {
InstructionSequence* code,
const char* debug_name = nullptr);
- bool AllocationOk() { return allocation_ok_; }
-
const ZoneVector<LiveRange*>& live_ranges() const { return live_ranges_; }
const ZoneVector<LiveRange*>& fixed_live_ranges() const {
return fixed_live_ranges_;
@@ -405,8 +432,8 @@ class RegisterAllocator FINAL : public ZoneObject {
void AllocateGeneralRegisters();
void AllocateDoubleRegisters();
- // Phase 5: reassign spill splots for maximal reuse.
- void ReuseSpillSlots();
+ // Phase 5: assign spill splots.
+ void AssignSpillSlots();
// Phase 6: commit assignment.
void CommitAssignment();
@@ -421,15 +448,7 @@ class RegisterAllocator FINAL : public ZoneObject {
void ResolveControlFlow();
private:
- int GetVirtualRegister() {
- int vreg = code()->NextVirtualRegister();
- if (vreg >= UnallocatedOperand::kMaxVirtualRegisters) {
- allocation_ok_ = false;
- // Maintain the invariant that we return something below the maximum.
- return 0;
- }
- return vreg;
- }
+ int GetVirtualRegister() { return code()->NextVirtualRegister(); }
// Checks whether the value of a given virtual register is a reference.
// TODO(titzer): rename this to IsReference.
@@ -494,8 +513,6 @@ class RegisterAllocator FINAL : public ZoneObject {
bool TryAllocateFreeReg(LiveRange* range);
void AllocateBlockedReg(LiveRange* range);
SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
- void FreeSpillSlot(LiveRange* range);
- InstructionOperand* TryReuseSpillSlot(LiveRange* range);
// Live range splitting helpers.
@@ -570,6 +587,7 @@ class RegisterAllocator FINAL : public ZoneObject {
Frame* frame() const { return frame_; }
const char* debug_name() const { return debug_name_; }
const RegisterConfiguration* config() const { return config_; }
+ InstructionOperandCache* operand_cache() const { return operand_cache_; }
ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
ZoneVector<LiveRange*>& fixed_live_ranges() { return fixed_live_ranges_; }
ZoneVector<LiveRange*>& fixed_double_live_ranges() {
@@ -582,7 +600,6 @@ class RegisterAllocator FINAL : public ZoneObject {
ZoneVector<LiveRange*>& inactive_live_ranges() {
return inactive_live_ranges_;
}
- ZoneVector<LiveRange*>& reusable_slots() { return reusable_slots_; }
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
struct PhiMapValue {
@@ -591,8 +608,7 @@ class RegisterAllocator FINAL : public ZoneObject {
PhiInstruction* const phi;
const InstructionBlock* const block;
};
- typedef std::map<int, PhiMapValue, std::less<int>,
- zone_allocator<std::pair<int, PhiMapValue>>> PhiMap;
+ typedef ZoneMap<int, PhiMapValue> PhiMap;
Zone* const local_zone_;
Frame* const frame_;
@@ -600,7 +616,7 @@ class RegisterAllocator FINAL : public ZoneObject {
const char* const debug_name_;
const RegisterConfiguration* config_;
-
+ InstructionOperandCache* const operand_cache_;
PhiMap phi_map_;
// During liveness analysis keep a mapping from block id to live_in sets
@@ -616,7 +632,6 @@ class RegisterAllocator FINAL : public ZoneObject {
ZoneVector<LiveRange*> unhandled_live_ranges_;
ZoneVector<LiveRange*> active_live_ranges_;
ZoneVector<LiveRange*> inactive_live_ranges_;
- ZoneVector<LiveRange*> reusable_slots_;
ZoneVector<SpillRange*> spill_ranges_;
RegisterKind mode_;
@@ -625,9 +640,6 @@ class RegisterAllocator FINAL : public ZoneObject {
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
- // Indicates success or failure during register allocation.
- bool allocation_ok_;
-
#ifdef DEBUG
LifetimePosition allocation_finger_;
#endif
diff --git a/deps/v8/src/compiler/register-configuration.cc b/deps/v8/src/compiler/register-configuration.cc
index e7d8bbd607..30946fc373 100644
--- a/deps/v8/src/compiler/register-configuration.cc
+++ b/deps/v8/src/compiler/register-configuration.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/register-configuration.h"
+#include "src/globals.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/register-configuration.h b/deps/v8/src/compiler/register-configuration.h
index 8178ba2f0d..f0d58735ba 100644
--- a/deps/v8/src/compiler/register-configuration.h
+++ b/deps/v8/src/compiler/register-configuration.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
#define V8_COMPILER_REGISTER_CONFIGURATION_H_
-#include "src/v8.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 8720afdde3..6ec4b86bb4 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -10,7 +10,6 @@
#include "src/base/bits.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/simplified-operator.h"
namespace v8 {
@@ -31,8 +30,7 @@ class RepresentationChanger {
type_error_(false) {}
// TODO(titzer): should Word64 also be implicitly convertable to others?
- static const MachineTypeUnion rWord =
- kRepBit | kRepWord8 | kRepWord16 | kRepWord32;
+ static const MachineTypeUnion rWord = kRepWord8 | kRepWord16 | kRepWord32;
Node* GetRepresentationFor(Node* node, MachineTypeUnion output_type,
MachineTypeUnion use_type) {
@@ -262,8 +260,10 @@ class RepresentationChanger {
break;
}
// Select the correct X -> Word32 operator.
- const Operator* op = NULL;
- if (output_type & kRepFloat64) {
+ const Operator* op;
+ if (output_type & kRepBit) {
+ return node; // Sloppy comparison -> word32
+ } else if (output_type & kRepFloat64) {
if (output_type & kTypeUint32 || use_unsigned) {
op = machine()->ChangeFloat64ToUint32();
} else {
@@ -291,35 +291,19 @@ class RepresentationChanger {
Node* GetBitRepresentationFor(Node* node, MachineTypeUnion output_type) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
- case IrOpcode::kInt32Constant: {
- int32_t value = OpParameter<int32_t>(node);
- if (value == 0 || value == 1) return node;
- return jsgraph()->Int32Constant(1); // value != 0
- }
- case IrOpcode::kNumberConstant: {
- double value = OpParameter<double>(node);
- if (std::isnan(value) || value == 0.0) {
- return jsgraph()->Int32Constant(0);
- }
- return jsgraph()->Int32Constant(1);
- }
case IrOpcode::kHeapConstant: {
- Handle<Object> handle = OpParameter<Unique<Object> >(node).handle();
- DCHECK(*handle == isolate()->heap()->true_value() ||
- *handle == isolate()->heap()->false_value());
+ Handle<Object> value = OpParameter<Unique<Object> >(node).handle();
+ DCHECK(value.is_identical_to(factory()->true_value()) ||
+ value.is_identical_to(factory()->false_value()));
return jsgraph()->Int32Constant(
- *handle == isolate()->heap()->true_value() ? 1 : 0);
+ value.is_identical_to(factory()->true_value()) ? 1 : 0);
}
default:
break;
}
// Select the correct X -> Bit operator.
const Operator* op;
- if (output_type & rWord) {
- return node; // No change necessary.
- } else if (output_type & kRepWord64) {
- return node; // TODO(titzer): No change necessary, on 64-bit.
- } else if (output_type & kRepTagged) {
+ if (output_type & kRepTagged) {
op = simplified()->ChangeBoolToBit();
} else {
return TypeError(node, output_type, kRepBit);
@@ -464,8 +448,9 @@ class RepresentationChanger {
node);
}
- JSGraph* jsgraph() { return jsgraph_; }
- Isolate* isolate() { return isolate_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const { return isolate_; }
+ Factory* factory() const { return isolate()->factory(); }
SimplifiedOperatorBuilder* simplified() { return simplified_; }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
};
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 30bfbc8ecf..8924ae5440 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/schedule.h"
+
#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/schedule.h"
#include "src/ostreams.h"
namespace v8 {
@@ -17,13 +17,13 @@ BasicBlock::BasicBlock(Zone* zone, Id id)
rpo_number_(-1),
deferred_(false),
dominator_depth_(-1),
- dominator_(NULL),
- rpo_next_(NULL),
- loop_header_(NULL),
- loop_end_(NULL),
+ dominator_(nullptr),
+ rpo_next_(nullptr),
+ loop_header_(nullptr),
+ loop_end_(nullptr),
loop_depth_(0),
control_(kNone),
- control_input_(NULL),
+ control_input_(nullptr),
nodes_(zone),
successors_(zone),
predecessors_(zone),
@@ -81,6 +81,19 @@ void BasicBlock::set_loop_header(BasicBlock* loop_header) {
}
+// static
+BasicBlock* BasicBlock::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
+ while (b1 != b2) {
+ if (b1->dominator_depth() < b2->dominator_depth()) {
+ b2 = b2->dominator();
+ } else {
+ b1 = b1->dominator();
+ }
+ }
+ return b1;
+}
+
+
std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
switch (c) {
case BasicBlock::kNone:
@@ -89,6 +102,8 @@ std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
return os << "goto";
case BasicBlock::kBranch:
return os << "branch";
+ case BasicBlock::kSwitch:
+ return os << "switch";
case BasicBlock::kReturn:
return os << "return";
case BasicBlock::kThrow:
@@ -196,6 +211,18 @@ void Schedule::AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
}
+void Schedule::AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
+ size_t succ_count) {
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
+ block->set_control(BasicBlock::kSwitch);
+ for (size_t index = 0; index < succ_count; ++index) {
+ AddSuccessor(block, succ_blocks[index]);
+ }
+ SetControlInput(block, sw);
+}
+
+
void Schedule::AddReturn(BasicBlock* block, Node* input) {
DCHECK(block->control() == BasicBlock::kNone);
block->set_control(BasicBlock::kReturn);
@@ -221,13 +248,30 @@ void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
MoveSuccessors(block, end);
AddSuccessor(block, tblock);
AddSuccessor(block, fblock);
- if (block->control_input() != NULL) {
+ if (block->control_input() != nullptr) {
SetControlInput(end, block->control_input());
}
SetControlInput(block, branch);
}
+void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
+ BasicBlock** succ_blocks, size_t succ_count) {
+ DCHECK_NE(BasicBlock::kNone, block->control());
+ DCHECK_EQ(BasicBlock::kNone, end->control());
+ end->set_control(block->control());
+ block->set_control(BasicBlock::kSwitch);
+ MoveSuccessors(block, end);
+ for (size_t index = 0; index < succ_count; ++index) {
+ AddSuccessor(block, succ_blocks[index]);
+ }
+ if (block->control_input() != nullptr) {
+ SetControlInput(end, block->control_input());
+ }
+ SetControlInput(block, sw);
+}
+
+
void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
block->AddSuccessor(succ);
succ->AddPredecessor(block);
@@ -235,13 +279,10 @@ void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
void Schedule::MoveSuccessors(BasicBlock* from, BasicBlock* to) {
- for (BasicBlock::Predecessors::iterator i = from->successors_begin();
- i != from->successors_end(); ++i) {
- BasicBlock* succ = *i;
- to->AddSuccessor(succ);
- for (BasicBlock::Predecessors::iterator j = succ->predecessors_begin();
- j != succ->predecessors_end(); ++j) {
- if (*j == from) *j = to;
+ for (BasicBlock* const successor : from->successors()) {
+ to->AddSuccessor(successor);
+ for (BasicBlock*& predecessor : successor->predecessors()) {
+ if (predecessor == from) predecessor = to;
}
}
from->ClearSuccessors();
@@ -264,24 +305,18 @@ void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
std::ostream& operator<<(std::ostream& os, const Schedule& s) {
- // TODO(svenpanne) Const-correct the RPO stuff/iterators.
- BasicBlockVector* rpo = const_cast<Schedule*>(&s)->rpo_order();
- for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
- BasicBlock* block = *i;
+ for (BasicBlock* block : *s.rpo_order()) {
os << "--- BLOCK B" << block->id();
if (block->deferred()) os << " (deferred)";
if (block->PredecessorCount() != 0) os << " <- ";
bool comma = false;
- for (BasicBlock::Predecessors::iterator j = block->predecessors_begin();
- j != block->predecessors_end(); ++j) {
+ for (BasicBlock const* predecessor : block->predecessors()) {
if (comma) os << ", ";
comma = true;
- os << "B" << (*j)->id();
+ os << "B" << predecessor->id();
}
os << " ---\n";
- for (BasicBlock::const_iterator j = block->begin(); j != block->end();
- ++j) {
- Node* node = *j;
+ for (Node* node : *block) {
os << " " << *node;
if (NodeProperties::IsTyped(node)) {
Bounds bounds = NodeProperties::GetBounds(node);
@@ -304,11 +339,10 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
}
os << " -> ";
comma = false;
- for (BasicBlock::Successors::iterator j = block->successors_begin();
- j != block->successors_end(); ++j) {
+ for (BasicBlock const* successor : block->successors()) {
if (comma) os << ", ";
comma = true;
- os << "B" << (*j)->id();
+ os << "B" << successor->id();
}
os << "\n";
}
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 0bba689785..d4d64533b5 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -6,23 +6,22 @@
#define V8_COMPILER_SCHEDULE_H_
#include <iosfwd>
-#include <vector>
-#include "src/v8.h"
-
-#include "src/compiler/node.h"
-#include "src/compiler/opcodes.h"
-#include "src/zone.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
class BasicBlock;
class BasicBlockInstrumentor;
-class Graph;
-class ConstructScheduleData;
-class CodeGenerator; // Because of a namespace bug in clang.
+class Node;
+
+
+typedef ZoneVector<BasicBlock*> BasicBlockVector;
+typedef ZoneVector<Node*> NodeVector;
+
// A basic block contains an ordered list of nodes and ends with a control
// node. Note that if a basic block has phis, then all phis must appear as the
@@ -34,6 +33,7 @@ class BasicBlock FINAL : public ZoneObject {
kNone, // Control not initialized yet.
kGoto, // Goto a single successor block.
kBranch, // Branch if true to first successor, otherwise second.
+ kSwitch, // Table dispatch to one of the successor blocks.
kReturn, // Return a value from this method.
kThrow // Throw an exception.
};
@@ -83,39 +83,32 @@ class BasicBlock FINAL : public ZoneObject {
Id id() const { return id_; }
- // Predecessors and successors.
- typedef ZoneVector<BasicBlock*> Predecessors;
- Predecessors::iterator predecessors_begin() { return predecessors_.begin(); }
- Predecessors::iterator predecessors_end() { return predecessors_.end(); }
- Predecessors::const_iterator predecessors_begin() const {
- return predecessors_.begin();
- }
- Predecessors::const_iterator predecessors_end() const {
- return predecessors_.end();
- }
+ // Predecessors.
+ BasicBlockVector& predecessors() { return predecessors_; }
+ const BasicBlockVector& predecessors() const { return predecessors_; }
size_t PredecessorCount() const { return predecessors_.size(); }
BasicBlock* PredecessorAt(size_t index) { return predecessors_[index]; }
void ClearPredecessors() { predecessors_.clear(); }
void AddPredecessor(BasicBlock* predecessor);
- typedef ZoneVector<BasicBlock*> Successors;
- Successors::iterator successors_begin() { return successors_.begin(); }
- Successors::iterator successors_end() { return successors_.end(); }
- Successors::const_iterator successors_begin() const {
- return successors_.begin();
- }
- Successors::const_iterator successors_end() const {
- return successors_.end();
- }
+ // Successors.
+ BasicBlockVector& successors() { return successors_; }
+ const BasicBlockVector& successors() const { return successors_; }
size_t SuccessorCount() const { return successors_.size(); }
BasicBlock* SuccessorAt(size_t index) { return successors_[index]; }
void ClearSuccessors() { successors_.clear(); }
void AddSuccessor(BasicBlock* successor);
// Nodes in the basic block.
+ typedef Node* value_type;
+ bool empty() const { return nodes_.empty(); }
+ size_t size() const { return nodes_.size(); }
Node* NodeAt(size_t index) { return nodes_[index]; }
size_t NodeCount() const { return nodes_.size(); }
+ value_type& front() { return nodes_.front(); }
+ value_type const& front() const { return nodes_.front(); }
+
typedef NodeVector::iterator iterator;
iterator begin() { return nodes_.begin(); }
iterator end() { return nodes_.end(); }
@@ -174,6 +167,10 @@ class BasicBlock FINAL : public ZoneObject {
inline bool IsLoopHeader() const { return loop_end_ != NULL; }
bool LoopContains(BasicBlock* block) const;
+ // Computes the immediate common dominator of {b1} and {b2}. The worst time
+ // complexity is O(N) where N is the height of the dominator tree.
+ static BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
+
private:
int32_t loop_number_; // loop number of the block.
int32_t rpo_number_; // special RPO number of the block.
@@ -191,20 +188,17 @@ class BasicBlock FINAL : public ZoneObject {
Node* control_input_; // Input value for control.
NodeVector nodes_; // nodes of this block in forward order.
- Successors successors_;
- Predecessors predecessors_;
+ BasicBlockVector successors_;
+ BasicBlockVector predecessors_;
Id id_;
DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
-std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c);
-std::ostream& operator<<(std::ostream& os, const BasicBlock::Id& id);
-std::ostream& operator<<(std::ostream& os, const BasicBlock::RpoNumber& rpo);
+std::ostream& operator<<(std::ostream&, const BasicBlock::Control&);
+std::ostream& operator<<(std::ostream&, const BasicBlock::Id&);
+std::ostream& operator<<(std::ostream&, const BasicBlock::RpoNumber&);
-typedef ZoneVector<BasicBlock*> BasicBlockVector;
-typedef BasicBlockVector::iterator BasicBlockVectorIter;
-typedef BasicBlockVector::reverse_iterator BasicBlockVectorRIter;
// A schedule represents the result of assigning nodes to basic blocks
// and ordering them within basic blocks. Prior to computing a schedule,
@@ -243,6 +237,10 @@ class Schedule FINAL : public ZoneObject {
void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock);
+ // BasicBlock building: add a switch at the end of {block}.
+ void AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
+ size_t succ_count);
+
// BasicBlock building: add a return at the end of {block}.
void AddReturn(BasicBlock* block, Node* input);
@@ -253,6 +251,10 @@ class Schedule FINAL : public ZoneObject {
void InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
BasicBlock* tblock, BasicBlock* fblock);
+ // BasicBlock mutation: insert a switch into the end of {block}.
+ void InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
+ BasicBlock** succ_blocks, size_t succ_count);
+
// Exposed publicly for testing only.
void AddSuccessorForTesting(BasicBlock* block, BasicBlock* succ) {
return AddSuccessor(block, succ);
@@ -286,7 +288,7 @@ class Schedule FINAL : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(Schedule);
};
-std::ostream& operator<<(std::ostream& os, const Schedule& s);
+std::ostream& operator<<(std::ostream&, const Schedule&);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index f12c6318d3..6e105e3713 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -2,18 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <deque>
-#include <queue>
-
#include "src/compiler/scheduler.h"
#include "src/bit-vector.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/node.h"
+#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -29,20 +27,21 @@ static inline void Trace(const char* msg, ...) {
}
-Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
: zone_(zone),
graph_(graph),
schedule_(schedule),
+ flags_(flags),
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
schedule_queue_(zone),
node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
-Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph) {
+Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
Schedule* schedule = new (graph->zone())
Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
- Scheduler scheduler(zone, graph, schedule);
+ Scheduler scheduler(zone, graph, schedule, flags);
scheduler.BuildCFG();
scheduler.ComputeSpecialRPONumbering();
@@ -75,7 +74,8 @@ Scheduler::Placement Scheduler::GetPlacement(Node* node) {
if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
switch (node->opcode()) {
case IrOpcode::kParameter:
- // Parameters are always fixed to the start node.
+ case IrOpcode::kOsrValue:
+ // Parameters and OSR values are always fixed to the start block.
data->placement_ = kFixed;
break;
case IrOpcode::kPhi:
@@ -126,11 +126,10 @@ void Scheduler::UpdatePlacement(Node* node, Placement placement) {
#undef DEFINE_CONTROL_CASE
{
// Control nodes force coupled uses to be placed.
- Node::Uses uses = node->uses();
- for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
- if (GetPlacement(*i) == Scheduler::kCoupled) {
- DCHECK_EQ(node, NodeProperties::GetControlInput(*i));
- UpdatePlacement(*i, placement);
+ for (auto use : node->uses()) {
+ if (GetPlacement(use) == Scheduler::kCoupled) {
+ DCHECK_EQ(node, NodeProperties::GetControlInput(use));
+ UpdatePlacement(use, placement);
}
}
break;
@@ -208,20 +207,6 @@ void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
}
-BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
- while (b1 != b2) {
- int32_t b1_depth = b1->dominator_depth();
- int32_t b2_depth = b2->dominator_depth();
- if (b1_depth < b2_depth) {
- b2 = b2->dominator();
- } else {
- b1 = b1->dominator();
- }
- }
- return b1;
-}
-
-
// -----------------------------------------------------------------------------
// Phase 1: Build control-flow graph.
@@ -233,7 +218,8 @@ BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
class CFGBuilder : public ZoneObject {
public:
CFGBuilder(Zone* zone, Scheduler* scheduler)
- : scheduler_(scheduler),
+ : zone_(zone),
+ scheduler_(scheduler),
schedule_(scheduler->schedule_),
queued_(scheduler->graph_, 2),
queue_(zone),
@@ -282,7 +268,7 @@ class CFGBuilder : public ZoneObject {
// single-exit region that makes up a minimal component to be scheduled.
if (IsSingleEntrySingleExitRegion(node, exit)) {
Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
- DCHECK_EQ(NULL, component_entry_);
+ DCHECK(!component_entry_);
component_entry_ = node;
continue;
}
@@ -292,7 +278,7 @@ class CFGBuilder : public ZoneObject {
Queue(node->InputAt(i));
}
}
- DCHECK_NE(NULL, component_entry_);
+ DCHECK(component_entry_);
for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
ConnectBlocks(*i); // Connect block to its predecessor/successors.
@@ -330,15 +316,9 @@ class CFGBuilder : public ZoneObject {
case IrOpcode::kMerge:
BuildBlockForNode(node);
break;
- case IrOpcode::kTerminate: {
- // Put Terminate in the loop to which it refers.
- Node* loop = NodeProperties::GetControlInput(node);
- BasicBlock* block = BuildBlockForNode(loop);
- FixNode(block, node);
- break;
- }
case IrOpcode::kBranch:
- BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
+ case IrOpcode::kSwitch:
+ BuildBlocksForSuccessors(node);
break;
default:
break;
@@ -355,10 +335,18 @@ class CFGBuilder : public ZoneObject {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectBranch(node);
break;
+ case IrOpcode::kSwitch:
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectSwitch(node);
+ break;
case IrOpcode::kReturn:
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectReturn(node);
break;
+ case IrOpcode::kThrow:
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectThrow(node);
+ break;
default:
break;
}
@@ -375,49 +363,28 @@ class CFGBuilder : public ZoneObject {
return block;
}
- void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
- IrOpcode::Value b) {
- Node* successors[2];
- CollectSuccessorProjections(node, successors, a, b);
- BuildBlockForNode(successors[0]);
- BuildBlockForNode(successors[1]);
- }
-
- // Collect the branch-related projections from a node, such as IfTrue,
- // IfFalse.
- // TODO(titzer): consider moving this to node.h
- void CollectSuccessorProjections(Node* node, Node** buffer,
- IrOpcode::Value true_opcode,
- IrOpcode::Value false_opcode) {
- buffer[0] = NULL;
- buffer[1] = NULL;
- for (Node* use : node->uses()) {
- if (use->opcode() == true_opcode) {
- DCHECK_EQ(NULL, buffer[0]);
- buffer[0] = use;
- }
- if (use->opcode() == false_opcode) {
- DCHECK_EQ(NULL, buffer[1]);
- buffer[1] = use;
- }
+ void BuildBlocksForSuccessors(Node* node) {
+ size_t const successor_cnt = node->op()->ControlOutputCount();
+ Node** successors = zone_->NewArray<Node*>(successor_cnt);
+ NodeProperties::CollectControlProjections(node, successors, successor_cnt);
+ for (size_t index = 0; index < successor_cnt; ++index) {
+ BuildBlockForNode(successors[index]);
}
- DCHECK_NE(NULL, buffer[0]);
- DCHECK_NE(NULL, buffer[1]);
}
- void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
- IrOpcode::Value true_opcode,
- IrOpcode::Value false_opcode) {
- Node* successors[2];
- CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
- buffer[0] = schedule_->block(successors[0]);
- buffer[1] = schedule_->block(successors[1]);
+ void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
+ size_t successor_cnt) {
+ Node** successors = reinterpret_cast<Node**>(successor_blocks);
+ NodeProperties::CollectControlProjections(node, successors, successor_cnt);
+ for (size_t index = 0; index < successor_cnt; ++index) {
+ successor_blocks[index] = schedule_->block(successors[index]);
+ }
}
void ConnectBranch(Node* branch) {
BasicBlock* successor_blocks[2];
- CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
- IrOpcode::kIfFalse);
+ CollectSuccessorBlocks(branch, successor_blocks,
+ arraysize(successor_blocks));
// Consider branch hints.
switch (BranchHintOf(branch->op())) {
@@ -439,7 +406,7 @@ class CFGBuilder : public ZoneObject {
} else {
Node* branch_block_node = NodeProperties::GetControlInput(branch);
BasicBlock* branch_block = schedule_->block(branch_block_node);
- DCHECK(branch_block != NULL);
+ DCHECK_NOT_NULL(branch_block);
TraceConnect(branch, branch_block, successor_blocks[0]);
TraceConnect(branch, branch_block, successor_blocks[1]);
@@ -448,12 +415,36 @@ class CFGBuilder : public ZoneObject {
}
}
+ void ConnectSwitch(Node* sw) {
+ size_t const successor_count = sw->op()->ControlOutputCount();
+ BasicBlock** successor_blocks =
+ zone_->NewArray<BasicBlock*>(successor_count);
+ CollectSuccessorBlocks(sw, successor_blocks, successor_count);
+
+ if (sw == component_entry_) {
+ for (size_t index = 0; index < successor_count; ++index) {
+ TraceConnect(sw, component_start_, successor_blocks[index]);
+ }
+ schedule_->InsertSwitch(component_start_, component_end_, sw,
+ successor_blocks, successor_count);
+ } else {
+ Node* sw_block_node = NodeProperties::GetControlInput(sw);
+ BasicBlock* sw_block = schedule_->block(sw_block_node);
+ DCHECK_NOT_NULL(sw_block);
+
+ for (size_t index = 0; index < successor_count; ++index) {
+ TraceConnect(sw, sw_block, successor_blocks[index]);
+ }
+ schedule_->AddSwitch(sw_block, sw, successor_blocks, successor_count);
+ }
+ }
+
void ConnectMerge(Node* merge) {
// Don't connect the special merge at the end to its predecessors.
if (IsFinalMerge(merge)) return;
BasicBlock* block = schedule_->block(merge);
- DCHECK(block != NULL);
+ DCHECK_NOT_NULL(block);
// For all of the merge's control inputs, add a goto at the end to the
// merge's basic block.
for (Node* const input : merge->inputs()) {
@@ -470,8 +461,15 @@ class CFGBuilder : public ZoneObject {
schedule_->AddReturn(return_block, ret);
}
+ void ConnectThrow(Node* thr) {
+ Node* throw_block_node = NodeProperties::GetControlInput(thr);
+ BasicBlock* throw_block = schedule_->block(throw_block_node);
+ TraceConnect(thr, throw_block, NULL);
+ schedule_->AddThrow(throw_block, thr);
+ }
+
void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
- DCHECK_NE(NULL, block);
+ DCHECK_NOT_NULL(block);
if (succ == NULL) {
Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
block->id().ToInt());
@@ -498,6 +496,7 @@ class CFGBuilder : public ZoneObject {
DCHECK(control_.empty());
}
+ Zone* zone_;
Scheduler* scheduler_;
Schedule* schedule_;
NodeMarker<bool> queued_; // Mark indicating whether node is queued.
@@ -556,7 +555,7 @@ class SpecialRPONumberer : public ZoneObject {
// that is for the graph spanned between the schedule's start and end blocks.
void ComputeSpecialRPO() {
DCHECK(schedule_->end()->SuccessorCount() == 0);
- DCHECK_EQ(NULL, order_); // Main order does not exist yet.
+ DCHECK(!order_); // Main order does not exist yet.
ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
}
@@ -564,7 +563,7 @@ class SpecialRPONumberer : public ZoneObject {
// that is for the graph spanned between the given {entry} and {end} blocks,
// then updates the existing ordering with this new information.
void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
- DCHECK_NE(NULL, order_); // Main order to be updated is present.
+ DCHECK(order_); // Main order to be updated is present.
ComputeAndInsertSpecialRPO(entry, end);
}
@@ -1032,8 +1031,8 @@ void Scheduler::ComputeSpecialRPONumbering() {
void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
for (/*nop*/; block != NULL; block = block->rpo_next()) {
- BasicBlock::Predecessors::iterator pred = block->predecessors_begin();
- BasicBlock::Predecessors::iterator end = block->predecessors_end();
+ auto pred = block->predecessors().begin();
+ auto end = block->predecessors().end();
DCHECK(pred != end); // All blocks except start have predecessors.
BasicBlock* dominator = *pred;
// For multiple predecessors, walk up the dominator tree until a common
@@ -1042,7 +1041,7 @@ void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
for (++pred; pred != end; ++pred) {
// Don't examine backwards edges.
if ((*pred)->dominator_depth() < 0) continue;
- dominator = GetCommonDominator(dominator, *pred);
+ dominator = BasicBlock::GetCommonDominator(dominator, *pred);
}
block->set_dominator(dominator);
block->set_dominator_depth(dominator->dominator_depth() + 1);
@@ -1069,7 +1068,7 @@ void Scheduler::GenerateImmediateDominatorTree() {
// Phase 3: Prepare use counts for nodes.
-class PrepareUsesVisitor : public NullNodeVisitor {
+class PrepareUsesVisitor {
public:
explicit PrepareUsesVisitor(Scheduler* scheduler)
: scheduler_(scheduler), schedule_(scheduler->schedule_) {}
@@ -1112,10 +1111,29 @@ class PrepareUsesVisitor : public NullNodeVisitor {
void Scheduler::PrepareUses() {
Trace("--- PREPARE USES -------------------------------------------\n");
- // Count the uses of every node, it will be used to ensure that all of a
+ // Count the uses of every node, which is used to ensure that all of a
// node's uses are scheduled before the node itself.
PrepareUsesVisitor prepare_uses(this);
- graph_->VisitNodeInputsFromEnd(&prepare_uses);
+
+ // TODO(turbofan): simplify the careful pre/post ordering here.
+ BoolVector visited(graph_->NodeCount(), false, zone_);
+ ZoneStack<Node::InputEdges::iterator> stack(zone_);
+ Node* node = graph_->end();
+ prepare_uses.Pre(node);
+ visited[node->id()] = true;
+ stack.push(node->input_edges().begin());
+ while (!stack.empty()) {
+ Edge edge = *stack.top();
+ Node* node = edge.to();
+ if (visited[node->id()]) {
+ prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
+ if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
+ } else {
+ prepare_uses.Pre(node);
+ visited[node->id()] = true;
+ if (node->InputCount() > 0) stack.push(node->input_edges().begin());
+ }
+ }
}
@@ -1130,8 +1148,8 @@ class ScheduleEarlyNodeVisitor {
// Run the schedule early algorithm on a set of fixed root nodes.
void Run(NodeVector* roots) {
- for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
- queue_.push(*i);
+ for (Node* const root : *roots) {
+ queue_.push(root);
while (!queue_.empty()) {
VisitNode(queue_.front());
queue_.pop();
@@ -1159,9 +1177,8 @@ class ScheduleEarlyNodeVisitor {
// Propagate schedule early position.
DCHECK(data->minimum_block_ != NULL);
- Node::Uses uses = node->uses();
- for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
- PropagateMinimumPositionToNode(data->minimum_block_, *i);
+ for (auto use : node->uses()) {
+ PropagateMinimumPositionToNode(data->minimum_block_, use);
}
}
@@ -1196,7 +1213,7 @@ class ScheduleEarlyNodeVisitor {
#if DEBUG
bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
- BasicBlock* dominator = scheduler_->GetCommonDominator(b1, b2);
+ BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
return dominator == b1 || dominator == b2;
}
#endif
@@ -1231,12 +1248,15 @@ void Scheduler::ScheduleEarly() {
class ScheduleLateNodeVisitor {
public:
ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
- : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
+ : scheduler_(scheduler),
+ schedule_(scheduler_->schedule_),
+ marked_(scheduler->zone_),
+ marking_queue_(scheduler->zone_) {}
// Run the schedule late algorithm on a set of fixed root nodes.
void Run(NodeVector* roots) {
- for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
- ProcessQueue(*i);
+ for (Node* const root : *roots) {
+ ProcessQueue(root);
}
}
@@ -1253,10 +1273,11 @@ class ScheduleLateNodeVisitor {
if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
queue->push(node);
- while (!queue->empty()) {
- VisitNode(queue->front());
+ do {
+ Node* const node = queue->front();
queue->pop();
- }
+ VisitNode(node);
+ } while (!queue->empty());
}
}
@@ -1278,7 +1299,7 @@ class ScheduleLateNodeVisitor {
// The schedule early block dominates the schedule late block.
BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
- DCHECK_EQ(min_block, scheduler_->GetCommonDominator(block, min_block));
+ DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n",
node->id(), node->op()->mnemonic(), block->id().ToInt(),
block->loop_depth(), min_block->id().ToInt());
@@ -1287,13 +1308,19 @@ class ScheduleLateNodeVisitor {
// into enclosing loop pre-headers until they would preceed their schedule
// early position.
BasicBlock* hoist_block = GetPreHeader(block);
- while (hoist_block != NULL &&
- hoist_block->dominator_depth() >= min_block->dominator_depth()) {
- Trace(" hoisting #%d:%s to block B%d\n", node->id(),
- node->op()->mnemonic(), hoist_block->id().ToInt());
- DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
- block = hoist_block;
- hoist_block = GetPreHeader(hoist_block);
+ if (hoist_block &&
+ hoist_block->dominator_depth() >= min_block->dominator_depth()) {
+ do {
+ Trace(" hoisting #%d:%s to block B%d\n", node->id(),
+ node->op()->mnemonic(), hoist_block->id().ToInt());
+ DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
+ block = hoist_block;
+ hoist_block = GetPreHeader(hoist_block);
+ } while (hoist_block &&
+ hoist_block->dominator_depth() >= min_block->dominator_depth());
+ } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
+ // Split the {node} if beneficial and return the new {block} for it.
+ block = SplitNode(block, node);
}
// Schedule the node or a floating control structure.
@@ -1304,6 +1331,101 @@ class ScheduleLateNodeVisitor {
}
}
+ // Mark {block} and push its non-marked predecessor on the marking queue.
+ void MarkBlock(BasicBlock* block) {
+ DCHECK_LT(block->id().ToSize(), marked_.size());
+ marked_[block->id().ToSize()] = true;
+ for (BasicBlock* pred_block : block->predecessors()) {
+ DCHECK_LT(pred_block->id().ToSize(), marked_.size());
+ if (marked_[pred_block->id().ToSize()]) continue;
+ marking_queue_.push_back(pred_block);
+ }
+ }
+
+ BasicBlock* SplitNode(BasicBlock* block, Node* node) {
+ // For now, we limit splitting to pure nodes.
+ if (!node->op()->HasProperty(Operator::kPure)) return block;
+
+ // The {block} is common dominator of all uses of {node}, so we cannot
+ // split anything unless the {block} has at least two successors.
+ DCHECK_EQ(block, GetCommonDominatorOfUses(node));
+ if (block->SuccessorCount() < 2) return block;
+
+ // Clear marking bits.
+ DCHECK(marking_queue_.empty());
+ std::fill(marked_.begin(), marked_.end(), false);
+ marked_.resize(schedule_->BasicBlockCount() + 1, false);
+
+ // Check if the {node} has uses in {block}.
+ for (Edge edge : node->use_edges()) {
+ BasicBlock* use_block = GetBlockForUse(edge);
+ if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
+ if (use_block == block) {
+ Trace(" not splitting #%d:%s, it is used in B%d\n", node->id(),
+ node->op()->mnemonic(), block->id().ToInt());
+ marking_queue_.clear();
+ return block;
+ }
+ MarkBlock(use_block);
+ }
+
+ // Compute transitive marking closure; a block is marked if all its
+ // successors are marked.
+ do {
+ BasicBlock* top_block = marking_queue_.front();
+ marking_queue_.pop_front();
+ if (marked_[top_block->id().ToSize()]) continue;
+ bool marked = true;
+ for (BasicBlock* successor : top_block->successors()) {
+ if (!marked_[successor->id().ToSize()]) {
+ marked = false;
+ break;
+ }
+ }
+ if (marked) MarkBlock(top_block);
+ } while (!marking_queue_.empty());
+
+ // If the (common dominator) {block} is marked, we know that all paths from
+ // {block} to the end contain at least one use of {node}, and hence there's
+ // no point in splitting the {node} in this case.
+ if (marked_[block->id().ToSize()]) {
+ Trace(" not splitting #%d:%s, its common dominator B%d is perfect\n",
+ node->id(), node->op()->mnemonic(), block->id().ToInt());
+ return block;
+ }
+
+ // Split {node} for uses according to the previously computed marking
+ // closure. Every marking partition has a unique dominator, which get's a
+ // copy of the {node} with the exception of the first partition, which get's
+ // the {node} itself.
+ ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
+ for (Edge edge : node->use_edges()) {
+ BasicBlock* use_block = GetBlockForUse(edge);
+ if (use_block == nullptr) continue;
+ while (marked_[use_block->dominator()->id().ToSize()]) {
+ use_block = use_block->dominator();
+ }
+ auto& use_node = dominators[use_block];
+ if (use_node == nullptr) {
+ if (dominators.size() == 1u) {
+ // Place the {node} at {use_block}.
+ block = use_block;
+ use_node = node;
+ Trace(" pushing #%d:%s down to B%d\n", node->id(),
+ node->op()->mnemonic(), block->id().ToInt());
+ } else {
+ // Place a copy of {node} at {use_block}.
+ use_node = CloneNode(node);
+ Trace(" cloning #%d:%s for B%d\n", use_node->id(),
+ use_node->op()->mnemonic(), use_block->id().ToInt());
+ scheduler_->schedule_queue_.push(use_node);
+ }
+ }
+ edge.UpdateTo(use_node);
+ }
+ return block;
+ }
+
BasicBlock* GetPreHeader(BasicBlock* block) {
if (block->IsLoopHeader()) {
return block->dominator();
@@ -1315,12 +1437,12 @@ class ScheduleLateNodeVisitor {
}
BasicBlock* GetCommonDominatorOfUses(Node* node) {
- BasicBlock* block = NULL;
+ BasicBlock* block = nullptr;
for (Edge edge : node->use_edges()) {
BasicBlock* use_block = GetBlockForUse(edge);
block = block == NULL ? use_block : use_block == NULL
? block
- : scheduler_->GetCommonDominator(
+ : BasicBlock::GetCommonDominator(
block, use_block);
}
return block;
@@ -1329,7 +1451,7 @@ class ScheduleLateNodeVisitor {
BasicBlock* GetBlockForUse(Edge edge) {
Node* use = edge.from();
IrOpcode::Value opcode = use->opcode();
- if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+ if (IrOpcode::IsPhiOpcode(opcode)) {
// If the use is from a coupled (i.e. floating) phi, compute the common
// dominator of its uses. This will not recurse more than one level.
if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
@@ -1366,8 +1488,25 @@ class ScheduleLateNodeVisitor {
scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
}
+ Node* CloneNode(Node* node) {
+ int const input_count = node->InputCount();
+ Node** const inputs = scheduler_->zone_->NewArray<Node*>(input_count);
+ for (int index = 0; index < input_count; ++index) {
+ Node* const input = node->InputAt(index);
+ scheduler_->IncrementUnscheduledUseCount(input, index, node);
+ inputs[index] = input;
+ }
+ Node* copy = scheduler_->graph_->NewNode(node->op(), input_count, inputs);
+ scheduler_->node_data_.resize(copy->id() + 1,
+ scheduler_->DefaultSchedulerData());
+ scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
+ return copy;
+ }
+
Scheduler* scheduler_;
Schedule* schedule_;
+ BoolVector marked_;
+ ZoneDeque<BasicBlock*> marking_queue_;
};
@@ -1403,7 +1542,7 @@ void Scheduler::SealFinalSchedule() {
for (NodeVector& nodes : scheduled_nodes_) {
BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
BasicBlock* block = schedule_->GetBlockById(id);
- for (NodeVectorRIter i = nodes.rbegin(); i != nodes.rend(); ++i) {
+ for (auto i = nodes.rbegin(); i != nodes.rend(); ++i) {
schedule_->AddNode(block, *i);
}
}
@@ -1439,10 +1578,7 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
NodeVector propagation_roots(control_flow_builder_->control_);
for (Node* node : control_flow_builder_->control_) {
for (Node* use : node->uses()) {
- if (use->opcode() == IrOpcode::kPhi ||
- use->opcode() == IrOpcode::kEffectPhi) {
- propagation_roots.push_back(use);
- }
+ if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
}
}
if (FLAG_trace_turbo_scheduler) {
@@ -1471,9 +1607,9 @@ void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(),
to->id().ToInt());
NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
- for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) {
- schedule_->SetBlockForNode(to, *i);
- scheduled_nodes_[to->id().ToSize()].push_back(*i);
+ for (Node* const node : *nodes) {
+ schedule_->SetBlockForNode(to, node);
+ scheduled_nodes_[to->id().ToSize()].push_back(node);
}
nodes->clear();
}
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index 9da0b6daa4..269c271ae5 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_SCHEDULER_H_
#define V8_COMPILER_SCHEDULER_H_
-#include "src/v8.h"
-
+#include "src/base/flags.h"
+#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/schedule.h"
#include "src/compiler/zone-pool.h"
@@ -16,17 +16,24 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
class CFGBuilder;
class ControlEquivalence;
+class Graph;
class SpecialRPONumberer;
+
// Computes a schedule from a graph, placing nodes into basic blocks and
// ordering the basic blocks in the special RPO order.
class Scheduler {
public:
+ // Flags that control the mode of operation.
+ enum Flag { kNoFlags = 0u, kSplitNodes = 1u << 1 };
+ typedef base::Flags<Flag> Flags;
+
// The complete scheduling algorithm. Creates a new schedule and places all
// nodes from the graph into it.
- static Schedule* ComputeSchedule(Zone* zone, Graph* graph);
+ static Schedule* ComputeSchedule(Zone* zone, Graph* graph, Flags flags);
// Compute the RPO of blocks in an existing schedule.
static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule);
@@ -56,6 +63,7 @@ class Scheduler {
Zone* zone_;
Graph* graph_;
Schedule* schedule_;
+ Flags flags_;
NodeVectorVector scheduled_nodes_; // Per-block list of nodes in reverse.
NodeVector schedule_root_nodes_; // Fixed root nodes seed the worklist.
ZoneQueue<Node*> schedule_queue_; // Worklist of schedulable nodes.
@@ -64,7 +72,7 @@ class Scheduler {
SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks.
ControlEquivalence* equivalence_; // Control dependence equivalence.
- Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
+ Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags);
inline SchedulerData DefaultSchedulerData();
inline SchedulerData* GetData(Node* node);
@@ -76,7 +84,6 @@ class Scheduler {
void IncrementUnscheduledUseCount(Node* node, int index, Node* from);
void DecrementUnscheduledUseCount(Node* node, int index, Node* from);
- BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
void PropagateImmediateDominators(BasicBlock* block);
// Phase 1: Build control-flow graph.
@@ -107,6 +114,9 @@ class Scheduler {
void MovePlannedNodes(BasicBlock* from, BasicBlock* to);
};
+
+DEFINE_OPERATORS_FOR_FLAGS(Scheduler::Flags)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/select-lowering.cc b/deps/v8/src/compiler/select-lowering.cc
index edecf58fb8..2e0f0d1a34 100644
--- a/deps/v8/src/compiler/select-lowering.cc
+++ b/deps/v8/src/compiler/select-lowering.cc
@@ -62,7 +62,7 @@ Reduction SelectLowering::Reduce(Node* node) {
bool SelectLowering::ReachableFrom(Node* const sink, Node* const source) {
// TODO(turbofan): This is probably horribly expensive, and it should be moved
// into node.h or somewhere else?!
- Zone zone(graph()->zone()->isolate());
+ Zone zone;
std::queue<Node*, NodeDeque> queue((NodeDeque(&zone)));
BoolVector visited(graph()->NodeCount(), false, &zone);
queue.push(source);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 1461709dab..59dd741ee4 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -10,13 +10,14 @@
#include "src/code-factory.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/diamond.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-lowering.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/source-position.h"
#include "src/objects.h"
namespace v8 {
@@ -65,7 +66,8 @@ class RepresentationSelector {
};
RepresentationSelector(JSGraph* jsgraph, Zone* zone,
- RepresentationChanger* changer)
+ RepresentationChanger* changer,
+ SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
count_(jsgraph->graph()->NodeCount()),
info_(zone->NewArray<NodeInfo>(count_)),
@@ -73,16 +75,12 @@ class RepresentationSelector {
replacements_(zone),
phase_(PROPAGATE),
changer_(changer),
- queue_(zone) {
+ queue_(zone),
+ source_positions_(source_positions) {
memset(info_, 0, sizeof(NodeInfo) * count_);
- Factory* f = zone->isolate()->factory();
- safe_bit_range_ =
- Type::Union(Type::Boolean(),
- Type::Range(f->NewNumber(0), f->NewNumber(1), zone), zone);
safe_int_additive_range_ =
- Type::Range(f->NewNumber(-std::pow(2.0, 52.0)),
- f->NewNumber(std::pow(2.0, 52.0)), zone);
+ Type::Range(-std::pow(2.0, 52.0), std::pow(2.0, 52.0), zone);
}
void Run(SimplifiedLowering* lowering) {
@@ -111,7 +109,13 @@ class RepresentationSelector {
Node* node = *i;
TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
// Reuse {VisitNode()} so the representation rules are in one place.
- VisitNode(node, GetUseInfo(node), lowering);
+ if (FLAG_turbo_source_positions) {
+ SourcePositionTable::Scope scope(
+ source_positions_, source_positions_->GetSourcePosition(node));
+ VisitNode(node, GetUseInfo(node), lowering);
+ } else {
+ VisitNode(node, GetUseInfo(node), lowering);
+ }
}
// Perform the final replacements.
@@ -250,12 +254,17 @@ class RepresentationSelector {
++i, j--) {
ProcessInput(node, (*i).index(), kMachAnyTagged); // Context inputs
}
+ for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
+ ++i, j--) {
+ Enqueue((*i).to()); // FrameState inputs: just visit
+ }
for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
Enqueue((*i).to()); // Effect inputs: just visit
}
for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
Enqueue((*i).to()); // Control inputs: just visit
}
+ DCHECK(i == node->input_edges().end());
SetOutput(node, kMachAnyTagged);
}
@@ -323,7 +332,7 @@ class RepresentationSelector {
} else {
return kRepFloat64;
}
- } else if (IsSafeBitOperand(node)) {
+ } else if (upper->Is(Type::Boolean())) {
// multiple uses => pick kRepBit.
return kRepBit;
} else if (upper->Is(Type::Number())) {
@@ -417,11 +426,6 @@ class RepresentationSelector {
return BothInputsAre(node, Type::Signed32()) && !CanObserveNonInt32(use);
}
- bool IsSafeBitOperand(Node* node) {
- Type* type = NodeProperties::GetBounds(node).upper;
- return type->Is(safe_bit_range_);
- }
-
bool IsSafeIntAdditiveOperand(Node* node) {
Type* type = NodeProperties::GetBounds(node).upper;
// TODO(jarin): Unfortunately, bitset types are not subtypes of larger
@@ -482,6 +486,8 @@ class RepresentationSelector {
SetOutput(node, kRepTagged | changer_->TypeFromUpperBound(upper));
return;
}
+ case IrOpcode::kAlways:
+ return VisitLeaf(node, kRepBit);
case IrOpcode::kInt32Constant:
return VisitLeaf(node, kRepWord32);
case IrOpcode::kInt64Constant:
@@ -495,18 +501,14 @@ class RepresentationSelector {
case IrOpcode::kHeapConstant:
return VisitLeaf(node, kRepTagged);
- case IrOpcode::kEnd:
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kReturn:
- case IrOpcode::kMerge:
- case IrOpcode::kThrow:
- return VisitInputs(node); // default visit for all node inputs.
-
case IrOpcode::kBranch:
ProcessInput(node, 0, kRepBit);
Enqueue(NodeProperties::GetControlInput(node, 0));
break;
+ case IrOpcode::kSwitch:
+ ProcessInput(node, 0, kRepWord32);
+ Enqueue(NodeProperties::GetControlInput(node, 0));
+ break;
case IrOpcode::kSelect:
return VisitSelect(node, use, lowering);
case IrOpcode::kPhi:
@@ -530,24 +532,20 @@ class RepresentationSelector {
// Simplified operators.
//------------------------------------------------------------------
case IrOpcode::kAnyToBoolean: {
- if (IsSafeBitOperand(node->InputAt(0))) {
- VisitUnop(node, kRepBit, kRepBit);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(node, kMachAnyTagged, kTypeBool | kRepTagged);
- if (lower()) {
- // AnyToBoolean(x) => Call(ToBooleanStub, x, no-context)
- Operator::Properties properties = node->op()->properties();
- Callable callable = CodeFactory::ToBoolean(
- jsgraph_->isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
- CallDescriptor::Flags flags = CallDescriptor::kPatchableCallSite;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- callable.descriptor(), 0, flags, properties, jsgraph_->zone());
- node->set_op(jsgraph_->common()->Call(desc));
- node->InsertInput(jsgraph_->zone(), 0,
- jsgraph_->HeapConstant(callable.code()));
- node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
- }
+ VisitUnop(node, kMachAnyTagged, kTypeBool | kRepTagged);
+ if (lower()) {
+ // AnyToBoolean(x) => Call(ToBooleanStub, x, no-context)
+ Operator::Properties properties = node->op()->properties();
+ Callable callable = CodeFactory::ToBoolean(
+ jsgraph_->isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
+ CallDescriptor::Flags flags = CallDescriptor::kPatchableCallSite;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+ flags, properties);
+ node->set_op(jsgraph_->common()->Call(desc));
+ node->InsertInput(jsgraph_->zone(), 0,
+ jsgraph_->HeapConstant(callable.code()));
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
}
break;
}
@@ -726,15 +724,15 @@ class RepresentationSelector {
// If the input has type uint32, pass through representation.
VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeUint32 ||
- in_upper->Is(Type::Unsigned32())) {
+ } else if ((in & kTypeMask) == kTypeInt32 ||
+ in_upper->Is(Type::Signed32())) {
// Just change representation if necessary.
- VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
+ VisitUnop(node, kTypeInt32 | kRepWord32, kTypeUint32 | kRepWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeInt32 ||
+ } else if ((in & kTypeMask) == kTypeUint32 ||
(in & kRepMask) == kRepWord32) {
// Just change representation if necessary.
- VisitUnop(node, kTypeInt32 | kRepWord32, kTypeUint32 | kRepWord32);
+ VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
// Require the input in float64 format and perform truncation.
@@ -745,6 +743,23 @@ class RepresentationSelector {
}
break;
}
+ case IrOpcode::kPlainPrimitiveToNumber: {
+ VisitUnop(node, kMachAnyTagged, kTypeNumber | kRepTagged);
+ if (lower()) {
+ // PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
+ Operator::Properties properties = node->op()->properties();
+ Callable callable = CodeFactory::ToNumber(jsgraph_->isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+ flags, properties);
+ node->set_op(jsgraph_->common()->Call(desc));
+ node->InsertInput(jsgraph_->zone(), 0,
+ jsgraph_->HeapConstant(callable.code()));
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ }
+ break;
+ }
case IrOpcode::kReferenceEqual: {
VisitBinop(node, kMachAnyTagged, kRepBit);
if (lower()) node->set_op(lowering->machine()->WordEqual());
@@ -849,10 +864,10 @@ class RepresentationSelector {
if (lower()) {
Node* is_tagged = jsgraph_->graph()->NewNode(
jsgraph_->machine()->WordAnd(), node->InputAt(0),
- jsgraph_->Int32Constant(static_cast<int>(kSmiTagMask)));
+ jsgraph_->IntPtrConstant(kSmiTagMask));
Node* is_smi = jsgraph_->graph()->NewNode(
jsgraph_->machine()->WordEqual(), is_tagged,
- jsgraph_->Int32Constant(kSmiTag));
+ jsgraph_->IntPtrConstant(kSmiTag));
DeferReplacement(node, is_smi);
}
break;
@@ -863,13 +878,13 @@ class RepresentationSelector {
if (lower()) {
Node* is_tagged = jsgraph_->graph()->NewNode(
jsgraph_->machine()->WordAnd(), node->InputAt(0),
- jsgraph_->Int32Constant(static_cast<int>(kSmiTagMask)));
+ jsgraph_->IntPtrConstant(kSmiTagMask));
Node* is_smi = jsgraph_->graph()->NewNode(
jsgraph_->machine()->WordEqual(), is_tagged,
- jsgraph_->Int32Constant(kSmiTag));
+ jsgraph_->IntPtrConstant(kSmiTag));
Node* is_non_neg = jsgraph_->graph()->NewNode(
jsgraph_->machine()->IntLessThanOrEqual(),
- jsgraph_->Int32Constant(0), node->InputAt(0));
+ jsgraph_->IntPtrConstant(0), node->InputAt(0));
Node* is_non_neg_smi = jsgraph_->graph()->NewNode(
jsgraph_->machine()->Word32And(), is_smi, is_non_neg);
DeferReplacement(node, is_non_neg_smi);
@@ -1028,14 +1043,16 @@ class RepresentationSelector {
node->op()->mnemonic(), replacement->id(),
replacement->op()->mnemonic()));
}
- if (replacement->id() < count_) {
- // Replace with a previously existing node eagerly.
+ if (replacement->id() < count_ &&
+ GetInfo(replacement)->output == GetInfo(node)->output) {
+ // Replace with a previously existing node eagerly only if the type is the
+ // same.
node->ReplaceUses(replacement);
} else {
// Otherwise, we are replacing a node with a representation change.
// Such a substitution must be done after all lowering is done, because
- // new nodes do not have {NodeInfo} entries, and that would confuse
- // the representation change insertion for uses of it.
+ // changing the type could confuse the representation change
+ // insertion for uses of the node.
replacements_.push_back(node);
replacements_.push_back(replacement);
}
@@ -1064,7 +1081,12 @@ class RepresentationSelector {
Phase phase_; // current phase of algorithm
RepresentationChanger* changer_; // for inserting representation changes
ZoneQueue<Node*> queue_; // queue for traversing the graph
- Type* safe_bit_range_;
+ // TODO(danno): RepresentationSelector shouldn't know anything about the
+ // source positions table, but must for now since there currently is no other
+ // way to pass down source position information to nodes created during
+ // lowering. Once this phase becomes a vanilla reducer, it should get source
+ // position information via the SourcePositionWrapper like all other reducers.
+ SourcePositionTable* source_positions_;
Type* safe_int_additive_range_;
NodeInfo* GetInfo(Node* node) {
@@ -1087,9 +1109,9 @@ Node* SimplifiedLowering::IsTagged(Node* node) {
void SimplifiedLowering::LowerAllNodes() {
SimplifiedOperatorBuilder simplified(graph()->zone());
- RepresentationChanger changer(jsgraph(), &simplified,
- graph()->zone()->isolate());
- RepresentationSelector selector(jsgraph(), zone_, &changer);
+ RepresentationChanger changer(jsgraph(), &simplified, jsgraph()->isolate());
+ RepresentationSelector selector(jsgraph(), zone_, &changer,
+ source_positions_);
selector.Run(this);
}
@@ -1253,10 +1275,11 @@ void SimplifiedLowering::DoStoreElement(Node* node) {
void SimplifiedLowering::DoStringAdd(Node* node) {
Operator::Properties properties = node->op()->properties();
Callable callable = CodeFactory::StringAdd(
- zone()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ jsgraph()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- callable.descriptor(), 0, flags, properties, zone());
+ jsgraph()->isolate(), zone(), callable.descriptor(), 0, flags,
+ properties);
node->set_op(common()->Call(desc));
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
@@ -1267,14 +1290,14 @@ void SimplifiedLowering::DoStringAdd(Node* node) {
Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
- CEntryStub stub(zone()->isolate(), 1);
+ CEntryStub stub(jsgraph()->isolate(), 1);
Runtime::FunctionId f =
requires_ordering ? Runtime::kStringCompare : Runtime::kStringEquals;
- ExternalReference ref(f, zone()->isolate());
+ ExternalReference ref(f, jsgraph()->isolate());
Operator::Properties props = node->op()->properties();
// TODO(mstarzinger): We should call StringCompareStub here instead, once an
// interface descriptor is available for it.
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(f, 2, props, zone());
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(zone(), f, 2, props);
return graph()->NewNode(common()->Call(desc),
jsgraph()->HeapConstant(stub.GetCode()),
NodeProperties::GetValueInput(node, 0),
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index b21cf21ffd..11506e9ac0 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -16,12 +16,13 @@ namespace compiler {
// Forward declarations.
class RepresentationChanger;
-
+class SourcePositionTable;
class SimplifiedLowering FINAL {
public:
- SimplifiedLowering(JSGraph* jsgraph, Zone* zone)
- : jsgraph_(jsgraph), zone_(zone) {}
+ SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
+ SourcePositionTable* source_positions)
+ : jsgraph_(jsgraph), zone_(zone), source_positions_(source_positions) {}
~SimplifiedLowering() {}
void LowerAllNodes();
@@ -45,6 +46,13 @@ class SimplifiedLowering FINAL {
JSGraph* const jsgraph_;
Zone* const zone_;
+ // TODO(danno): SimplifiedLowering shouldn't know anything about the source
+ // positions table, but must for now since there currently is no other way to
+ // pass down source position information to nodes created during
+ // lowering. Once this phase becomes a vanilla reducer, it should get source
+ // position information via the SourcePositionWrapper like all other reducers.
+ SourcePositionTable* source_positions_;
+
Node* SmiTag(Node* node);
Node* IsTagged(Node* node);
Node* Untag(Node* node);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 9d45e5b192..ad48379f4c 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -8,7 +8,8 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 1e565b8b13..32a7bcc560 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -12,6 +12,7 @@ namespace v8 {
namespace internal {
// Forward declarations.
+class Factory;
class Heap;
namespace compiler {
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 9d88d12301..e1e8c30aad 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -157,33 +157,34 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
}
-#define PURE_OP_LIST(V) \
- V(AnyToBoolean, Operator::kNoProperties, 1) \
- V(BooleanNot, Operator::kNoProperties, 1) \
- V(BooleanToNumber, Operator::kNoProperties, 1) \
- V(NumberEqual, Operator::kCommutative, 2) \
- V(NumberLessThan, Operator::kNoProperties, 2) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
- V(NumberAdd, Operator::kCommutative, 2) \
- V(NumberSubtract, Operator::kNoProperties, 2) \
- V(NumberMultiply, Operator::kCommutative, 2) \
- V(NumberDivide, Operator::kNoProperties, 2) \
- V(NumberModulus, Operator::kNoProperties, 2) \
- V(NumberToInt32, Operator::kNoProperties, 1) \
- V(NumberToUint32, Operator::kNoProperties, 1) \
- V(StringEqual, Operator::kCommutative, 2) \
- V(StringLessThan, Operator::kNoProperties, 2) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2) \
- V(StringAdd, Operator::kNoProperties, 2) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
- V(ChangeBoolToBit, Operator::kNoProperties, 1) \
- V(ChangeBitToBool, Operator::kNoProperties, 1) \
- V(ObjectIsSmi, Operator::kNoProperties, 1) \
+#define PURE_OP_LIST(V) \
+ V(AnyToBoolean, Operator::kNoProperties, 1) \
+ V(BooleanNot, Operator::kNoProperties, 1) \
+ V(BooleanToNumber, Operator::kNoProperties, 1) \
+ V(NumberEqual, Operator::kCommutative, 2) \
+ V(NumberLessThan, Operator::kNoProperties, 2) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
+ V(NumberAdd, Operator::kCommutative, 2) \
+ V(NumberSubtract, Operator::kNoProperties, 2) \
+ V(NumberMultiply, Operator::kCommutative, 2) \
+ V(NumberDivide, Operator::kNoProperties, 2) \
+ V(NumberModulus, Operator::kNoProperties, 2) \
+ V(NumberToInt32, Operator::kNoProperties, 1) \
+ V(NumberToUint32, Operator::kNoProperties, 1) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
+ V(StringEqual, Operator::kCommutative, 2) \
+ V(StringLessThan, Operator::kNoProperties, 2) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2) \
+ V(StringAdd, Operator::kNoProperties, 2) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeBoolToBit, Operator::kNoProperties, 1) \
+ V(ChangeBitToBool, Operator::kNoProperties, 1) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1) \
V(ObjectIsNonNegativeSmi, Operator::kNoProperties, 1)
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 22664fa8f7..5eed7c3291 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -144,6 +144,8 @@ class SimplifiedOperatorBuilder FINAL {
const Operator* NumberToInt32();
const Operator* NumberToUint32();
+ const Operator* PlainPrimitiveToNumber();
+
const Operator* ReferenceEqual(Type* type);
const Operator* StringEqual();
diff --git a/deps/v8/src/compiler/source-position.cc b/deps/v8/src/compiler/source-position.cc
index 9e21ae415a..2ef18548dd 100644
--- a/deps/v8/src/compiler/source-position.cc
+++ b/deps/v8/src/compiler/source-position.cc
@@ -4,7 +4,7 @@
#include "src/compiler/source-position.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-aux-data.h"
namespace v8 {
namespace internal {
@@ -15,7 +15,7 @@ class SourcePositionTable::Decorator FINAL : public GraphDecorator {
explicit Decorator(SourcePositionTable* source_positions)
: source_positions_(source_positions) {}
- void Decorate(Node* node) FINAL {
+ void Decorate(Node* node, bool incomplete) FINAL {
DCHECK(!source_positions_->current_position_.IsInvalid());
source_positions_->table_.Set(node, source_positions_->current_position_);
}
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 137829e92d..9af65597bf 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/flags.h"
#include "src/bootstrapper.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/typer.h"
@@ -45,7 +44,8 @@ enum LazyCachedType {
// TODO(turbofan): these types could be globally cached or cached per isolate.
class LazyTypeCache FINAL : public ZoneObject {
public:
- explicit LazyTypeCache(Zone* zone) : zone_(zone) {
+ explicit LazyTypeCache(Isolate* isolate, Zone* zone)
+ : isolate_(isolate), zone_(zone) {
memset(cache_, 0, sizeof(cache_));
}
@@ -128,15 +128,15 @@ class LazyTypeCache FINAL : public ZoneObject {
}
Type* CreateRange(double min, double max) const {
- return Type::Range(factory()->NewNumber(min), factory()->NewNumber(max),
- zone());
+ return Type::Range(min, max, zone());
}
Factory* factory() const { return isolate()->factory(); }
- Isolate* isolate() const { return zone()->isolate(); }
+ Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
Type* cache_[kNumLazyCachedTypes];
+ Isolate* isolate_;
Zone* zone_;
};
@@ -144,25 +144,24 @@ class LazyTypeCache FINAL : public ZoneObject {
class Typer::Decorator FINAL : public GraphDecorator {
public:
explicit Decorator(Typer* typer) : typer_(typer) {}
- void Decorate(Node* node) FINAL;
+ void Decorate(Node* node, bool incomplete) FINAL;
private:
Typer* typer_;
};
-Typer::Typer(Graph* graph, MaybeHandle<Context> context)
- : graph_(graph),
+Typer::Typer(Isolate* isolate, Graph* graph, MaybeHandle<Context> context)
+ : isolate_(isolate),
+ graph_(graph),
context_(context),
decorator_(NULL),
- cache_(new (graph->zone()) LazyTypeCache(graph->zone())),
+ cache_(new (graph->zone()) LazyTypeCache(isolate, graph->zone())),
weaken_min_limits_(graph->zone()),
weaken_max_limits_(graph->zone()) {
Zone* zone = this->zone();
- Factory* f = zone->isolate()->factory();
+ Factory* f = isolate->factory();
- Handle<Object> zero = f->NewNumber(0);
- Handle<Object> one = f->NewNumber(1);
Handle<Object> infinity = f->NewNumber(+V8_INFINITY);
Handle<Object> minusinfinity = f->NewNumber(-V8_INFINITY);
@@ -180,8 +179,8 @@ Typer::Typer(Graph* graph, MaybeHandle<Context> context)
undefined_or_number = Type::Union(Type::Undefined(), Type::Number(), zone);
singleton_false = Type::Constant(f->false_value(), zone);
singleton_true = Type::Constant(f->true_value(), zone);
- singleton_zero = Type::Range(zero, zero, zone);
- singleton_one = Type::Range(one, one, zone);
+ singleton_zero = Type::Range(0.0, 0.0, zone);
+ singleton_one = Type::Range(1.0, 1.0, zone);
zero_or_one = Type::Union(singleton_zero, singleton_one, zone);
zeroish = Type::Union(singleton_zero, nan_or_minuszero, zone);
signed32ish = Type::Union(signed32, truncating_to_zero, zone);
@@ -193,7 +192,7 @@ Typer::Typer(Graph* graph, MaybeHandle<Context> context)
truish = Type::Union(
singleton_true,
Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone), zone);
- integer = Type::Range(minusinfinity, infinity, zone);
+ integer = Type::Range(-V8_INFINITY, V8_INFINITY, zone);
weakint = Type::Union(integer, nan_or_minuszero, zone);
number_fun0_ = Type::Function(number, zone);
@@ -209,11 +208,11 @@ Typer::Typer(Graph* graph, MaybeHandle<Context> context)
weaken_max_limits_.reserve(limits_count + 1);
double limit = 1 << 30;
- weaken_min_limits_.push_back(f->NewNumber(0));
- weaken_max_limits_.push_back(f->NewNumber(0));
+ weaken_min_limits_.push_back(0);
+ weaken_max_limits_.push_back(0);
for (int i = 0; i < limits_count; i++) {
- weaken_min_limits_.push_back(f->NewNumber(-limit));
- weaken_max_limits_.push_back(f->NewNumber(limit - 1));
+ weaken_min_limits_.push_back(-limit);
+ weaken_max_limits_.push_back(limit - 1);
limit *= 2;
}
@@ -313,14 +312,7 @@ class Typer::Visitor : public Reducer {
return BoundsOrNone(operand_node);
}
- Bounds ContextOperand(Node* node) {
- Bounds result = BoundsOrNone(NodeProperties::GetContextInput(node));
- DCHECK(result.upper->Maybe(Type::Internal()));
- // TODO(rossberg): More precisely, instead of the above assertion, we should
- // back-propagate the constraint that it has to be a subtype of Internal.
- return result;
- }
-
+ Bounds WrapContextBoundsForInput(Node* node);
Type* Weaken(Type* current_type, Type* previous_type);
Zone* zone() { return typer_->zone(); }
@@ -334,8 +326,16 @@ class Typer::Visitor : public Reducer {
Bounds TypeUnaryOp(Node* node, UnaryTyperFun);
Bounds TypeBinaryOp(Node* node, BinaryTyperFun);
+ enum ComparisonOutcomeFlags {
+ kComparisonTrue = 1,
+ kComparisonFalse = 2,
+ kComparisonUndefined = 4
+ };
+ typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
+
+ static ComparisonOutcome Invert(ComparisonOutcome, Typer*);
static Type* Invert(Type*, Typer*);
- static Type* FalsifyUndefined(Type*, Typer*);
+ static Type* FalsifyUndefined(ComparisonOutcome, Typer*);
static Type* Rangify(Type*, Typer*);
static Type* ToPrimitive(Type*, Typer*);
@@ -351,7 +351,7 @@ class Typer::Visitor : public Reducer {
static Type* JSDivideRanger(Type::RangeType*, Type::RangeType*, Typer*);
static Type* JSModulusRanger(Type::RangeType*, Type::RangeType*, Typer*);
- static Type* JSCompareTyper(Type*, Type*, Typer*);
+ static ComparisonOutcome JSCompareTyper(Type*, Type*, Typer*);
#define DECLARE_METHOD(x) static Type* x##Typer(Type*, Type*, Typer*);
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
@@ -417,7 +417,8 @@ void Typer::Run() {
}
-void Typer::Decorator::Decorate(Node* node) {
+void Typer::Decorator::Decorate(Node* node, bool incomplete) {
+ if (incomplete) return;
if (node->op()->ValueOutputCount() > 0) {
// Only eagerly type-decorate nodes with known input types.
// Other cases will generally require a proper fixpoint iteration with Run.
@@ -444,14 +445,14 @@ void Typer::Decorator::Decorate(Node* node) {
Bounds Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
Bounds input = Operand(node, 0);
- Type* upper = input.upper->Is(Type::None())
- ? Type::None()
- : f(input.upper, typer_);
- Type* lower = input.lower->Is(Type::None())
- ? Type::None()
- : (input.lower == input.upper || upper->IsConstant())
- ? upper // TODO(neis): Extend this to Range(x,x), NaN, MinusZero, ...?
- : f(input.lower, typer_);
+ Type* upper =
+ input.upper->IsInhabited() ? f(input.upper, typer_) : Type::None();
+ Type* lower = input.lower->IsInhabited()
+ ? ((input.lower == input.upper || upper->IsConstant())
+ ? upper // TODO(neis): Extend this to Range(x,x),
+ // NaN, MinusZero, ...?
+ : f(input.lower, typer_))
+ : Type::None();
// TODO(neis): Figure out what to do with lower bound.
return Bounds(lower, upper);
}
@@ -460,30 +461,49 @@ Bounds Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
Bounds Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
Bounds left = Operand(node, 0);
Bounds right = Operand(node, 1);
- Type* upper = left.upper->Is(Type::None()) || right.upper->Is(Type::None())
- ? Type::None()
- : f(left.upper, right.upper, typer_);
- Type* lower = left.lower->Is(Type::None()) || right.lower->Is(Type::None())
- ? Type::None()
- : ((left.lower == left.upper && right.lower == right.upper) ||
- upper->IsConstant())
- ? upper
- : f(left.lower, right.lower, typer_);
+ Type* upper = left.upper->IsInhabited() && right.upper->IsInhabited()
+ ? f(left.upper, right.upper, typer_)
+ : Type::None();
+ Type* lower =
+ left.lower->IsInhabited() && right.lower->IsInhabited()
+ ? (((left.lower == left.upper && right.lower == right.upper) ||
+ upper->IsConstant())
+ ? upper
+ : f(left.lower, right.lower, typer_))
+ : Type::None();
// TODO(neis): Figure out what to do with lower bound.
return Bounds(lower, upper);
}
Type* Typer::Visitor::Invert(Type* type, Typer* t) {
+ DCHECK(type->Is(Type::Boolean()));
+ DCHECK(type->IsInhabited());
if (type->Is(t->singleton_false)) return t->singleton_true;
if (type->Is(t->singleton_true)) return t->singleton_false;
return type;
}
-Type* Typer::Visitor::FalsifyUndefined(Type* type, Typer* t) {
- if (type->Is(Type::Undefined())) return t->singleton_false;
- return type;
+Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
+ ComparisonOutcome outcome, Typer* t) {
+ ComparisonOutcome result(0);
+ if ((outcome & kComparisonUndefined) != 0) result |= kComparisonUndefined;
+ if ((outcome & kComparisonTrue) != 0) result |= kComparisonFalse;
+ if ((outcome & kComparisonFalse) != 0) result |= kComparisonTrue;
+ return result;
+}
+
+
+Type* Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
+ if ((outcome & kComparisonFalse) != 0 ||
+ (outcome & kComparisonUndefined) != 0) {
+ return (outcome & kComparisonTrue) != 0 ? Type::Boolean()
+ : t->singleton_false;
+ }
+ // Type should be non empty, so we know it should be true.
+ DCHECK((outcome & kComparisonTrue) != 0);
+ return t->singleton_true;
}
@@ -500,8 +520,7 @@ Type* Typer::Visitor::Rangify(Type* type, Typer* t) {
DCHECK(std::isnan(max));
return type;
}
- Factory* f = t->isolate()->factory();
- return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+ return Type::Range(min, max, t->zone());
}
@@ -593,14 +612,33 @@ Bounds Typer::Visitor::TypeStart(Node* node) {
// Common operators.
+Bounds Typer::Visitor::TypeAlways(Node* node) {
+ return Bounds(Type::None(zone()), Type::Boolean(zone()));
+}
+
+
Bounds Typer::Visitor::TypeParameter(Node* node) {
return Bounds::Unbounded(zone());
}
+Bounds Typer::Visitor::TypeOsrValue(Node* node) {
+ if (node->InputAt(0)->opcode() == IrOpcode::kOsrLoopEntry) {
+ // Before deconstruction, OSR values have type {None} to avoid polluting
+ // the types of phis and other nodes in the graph.
+ return Bounds(Type::None(), Type::None());
+ }
+ if (NodeProperties::IsTyped(node)) {
+ // After deconstruction, OSR values may have had a type explicitly set.
+ return NodeProperties::GetBounds(node);
+ }
+ // Otherwise, be conservative.
+ return Bounds::Unbounded(zone());
+}
+
+
Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
- Factory* f = isolate()->factory();
- Handle<Object> number = f->NewNumber(OpParameter<int32_t>(node));
+ double number = OpParameter<int32_t>(node);
return Bounds(Type::Intersect(
Type::Range(number, number, zone()), Type::UntaggedSigned32(), zone()));
}
@@ -664,6 +702,12 @@ Bounds Typer::Visitor::TypeEffectPhi(Node* node) {
}
+Bounds Typer::Visitor::TypeEffectSet(Node* node) {
+ UNREACHABLE();
+ return Bounds();
+}
+
+
Bounds Typer::Visitor::TypeValueEffect(Node* node) {
UNREACHABLE();
return Bounds();
@@ -761,26 +805,41 @@ Type* Typer::Visitor::JSStrictNotEqualTyper(Type* lhs, Type* rhs, Typer* t) {
// (<, <=, >=, >) with the help of a single abstract one. It behaves like <
// but returns undefined when the inputs cannot be compared.
// We implement the typing analogously.
-Type* Typer::Visitor::JSCompareTyper(Type* lhs, Type* rhs, Typer* t) {
+Typer::Visitor::ComparisonOutcome Typer::Visitor::JSCompareTyper(Type* lhs,
+ Type* rhs,
+ Typer* t) {
lhs = ToPrimitive(lhs, t);
rhs = ToPrimitive(rhs, t);
if (lhs->Maybe(Type::String()) && rhs->Maybe(Type::String())) {
- return Type::Boolean();
+ return ComparisonOutcome(kComparisonTrue) |
+ ComparisonOutcome(kComparisonFalse);
}
lhs = ToNumber(lhs, t);
rhs = ToNumber(rhs, t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::Undefined();
+
+ // Shortcut for NaNs.
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return kComparisonUndefined;
+
+ ComparisonOutcome result;
if (lhs->IsConstant() && rhs->Is(lhs)) {
- // Types are equal and are inhabited only by a single semantic value,
- // which is not NaN due to the previous check.
- return t->singleton_false;
+ // Types are equal and are inhabited only by a single semantic value.
+ result = kComparisonFalse;
+ } else if (lhs->Min() >= rhs->Max()) {
+ result = kComparisonFalse;
+ } else if (lhs->Max() < rhs->Min()) {
+ result = kComparisonTrue;
+ } else {
+ // We cannot figure out the result, return both true and false. (We do not
+ // have to return undefined because that cannot affect the result of
+ // FalsifyUndefined.)
+ return ComparisonOutcome(kComparisonTrue) |
+ ComparisonOutcome(kComparisonFalse);
}
- if (lhs->Min() >= rhs->Max()) return t->singleton_false;
- if (lhs->Max() < rhs->Min() &&
- !lhs->Maybe(Type::NaN()) && !rhs->Maybe(Type::NaN())) {
- return t->singleton_true;
+ // Add the undefined if we could see NaN.
+ if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
+ result |= kComparisonUndefined;
}
- return Type::Boolean();
+ return result;
}
@@ -809,7 +868,6 @@ Type* Typer::Visitor::JSGreaterThanOrEqualTyper(
Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
- Factory* f = t->isolate()->factory();
lhs = NumberToInt32(ToNumber(lhs, t), t);
rhs = NumberToInt32(ToNumber(rhs, t), t);
double lmin = lhs->Min();
@@ -837,13 +895,12 @@ Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
// value.
max = std::min(max, -1.0);
}
- return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+ return Type::Range(min, max, t->zone());
// TODO(neis): Be precise for singleton inputs, here and elsewhere.
}
Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
- Factory* f = t->isolate()->factory();
lhs = NumberToInt32(ToNumber(lhs, t), t);
rhs = NumberToInt32(ToNumber(rhs, t), t);
double lmin = lhs->Min();
@@ -865,7 +922,7 @@ Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
min = 0;
max = std::min(max, rmax);
}
- return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+ return Type::Range(min, max, t->zone());
}
@@ -878,12 +935,12 @@ Type* Typer::Visitor::JSBitwiseXorTyper(Type* lhs, Type* rhs, Typer* t) {
double rmax = rhs->Max();
if ((lmin >= 0 && rmin >= 0) || (lmax < 0 && rmax < 0)) {
// Xor-ing negative or non-negative values results in a non-negative value.
- return Type::NonNegativeSigned32();
+ return Type::Unsigned31();
}
if ((lmax < 0 && rmin >= 0) || (lmin >= 0 && rmax < 0)) {
// Xor-ing a negative and a non-negative value results in a negative value.
// TODO(jarin) Use a range here.
- return Type::NegativeSigned32();
+ return Type::Negative32();
}
return Type::Signed32();
}
@@ -903,11 +960,17 @@ Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
// Right-shifting a non-negative value cannot make it negative, nor larger.
min = std::max(min, 0.0);
max = std::min(max, lhs->Max());
+ if (rhs->Min() > 0 && rhs->Max() <= 31) {
+ max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
+ }
}
if (lhs->Max() < 0) {
// Right-shifting a negative value cannot make it non-negative, nor smaller.
min = std::max(min, lhs->Min());
max = std::min(max, -1.0);
+ if (rhs->Min() > 0 && rhs->Max() <= 31) {
+ min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
+ }
}
if (rhs->Min() > 0 && rhs->Max() <= 31) {
// Right-shifting by a positive value yields a small integer value.
@@ -919,8 +982,7 @@ Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
// TODO(jarin) Ideally, the following micro-optimization should be performed
// by the type constructor.
if (max != Type::Signed32()->Max() || min != Type::Signed32()->Min()) {
- Factory* f = t->isolate()->factory();
- return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+ return Type::Range(min, max, t->zone());
}
return Type::Signed32();
}
@@ -928,11 +990,8 @@ Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
Type* Typer::Visitor::JSShiftRightLogicalTyper(Type* lhs, Type* rhs, Typer* t) {
lhs = NumberToUint32(ToNumber(lhs, t), t);
- Factory* f = t->isolate()->factory();
// Logical right-shifting any value cannot make it larger.
- Handle<Object> min = f->NewNumber(0);
- Handle<Object> max = f->NewNumber(lhs->Max());
- return Type::Range(min, max, t->zone());
+ return Type::Range(0.0, lhs->Max(), t->zone());
}
@@ -974,10 +1033,10 @@ static double array_max(double a[], size_t n) {
Type* Typer::Visitor::JSAddRanger(Type::RangeType* lhs, Type::RangeType* rhs,
Typer* t) {
double results[4];
- results[0] = lhs->Min()->Number() + rhs->Min()->Number();
- results[1] = lhs->Min()->Number() + rhs->Max()->Number();
- results[2] = lhs->Max()->Number() + rhs->Min()->Number();
- results[3] = lhs->Max()->Number() + rhs->Max()->Number();
+ results[0] = lhs->Min() + rhs->Min();
+ results[1] = lhs->Min() + rhs->Max();
+ results[2] = lhs->Max() + rhs->Min();
+ results[3] = lhs->Max() + rhs->Max();
// Since none of the inputs can be -0, the result cannot be -0 either.
// However, it can be nan (the sum of two infinities of opposite sign).
// On the other hand, if none of the "results" above is nan, then the actual
@@ -987,9 +1046,8 @@ Type* Typer::Visitor::JSAddRanger(Type::RangeType* lhs, Type::RangeType* rhs,
if (std::isnan(results[i])) ++nans;
}
if (nans == 4) return Type::NaN(); // [-inf..-inf] + [inf..inf] or vice versa
- Factory* f = t->isolate()->factory();
- Type* range = Type::Range(f->NewNumber(array_min(results, 4)),
- f->NewNumber(array_max(results, 4)), t->zone());
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
// Examples:
// [-inf, -inf] + [+inf, +inf] = NaN
@@ -1023,10 +1081,10 @@ Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
Type* Typer::Visitor::JSSubtractRanger(Type::RangeType* lhs,
Type::RangeType* rhs, Typer* t) {
double results[4];
- results[0] = lhs->Min()->Number() - rhs->Min()->Number();
- results[1] = lhs->Min()->Number() - rhs->Max()->Number();
- results[2] = lhs->Max()->Number() - rhs->Min()->Number();
- results[3] = lhs->Max()->Number() - rhs->Max()->Number();
+ results[0] = lhs->Min() - rhs->Min();
+ results[1] = lhs->Min() - rhs->Max();
+ results[2] = lhs->Max() - rhs->Min();
+ results[3] = lhs->Max() - rhs->Max();
// Since none of the inputs can be -0, the result cannot be -0.
// However, it can be nan (the subtraction of two infinities of same sign).
// On the other hand, if none of the "results" above is nan, then the actual
@@ -1036,9 +1094,8 @@ Type* Typer::Visitor::JSSubtractRanger(Type::RangeType* lhs,
if (std::isnan(results[i])) ++nans;
}
if (nans == 4) return Type::NaN(); // [inf..inf] - [inf..inf] (all same sign)
- Factory* f = t->isolate()->factory();
- Type* range = Type::Range(f->NewNumber(array_min(results, 4)),
- f->NewNumber(array_max(results, 4)), t->zone());
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
// Examples:
// [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
@@ -1062,10 +1119,10 @@ Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
Type::RangeType* rhs, Typer* t) {
double results[4];
- double lmin = lhs->Min()->Number();
- double lmax = lhs->Max()->Number();
- double rmin = rhs->Min()->Number();
- double rmax = rhs->Max()->Number();
+ double lmin = lhs->Min();
+ double lmax = lhs->Max();
+ double rmin = rhs->Min();
+ double rmax = rhs->Max();
results[0] = lmin * rmin;
results[1] = lmin * rmax;
results[2] = lmax * rmin;
@@ -1081,9 +1138,8 @@ Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
if (maybe_nan) return t->weakint; // Giving up.
bool maybe_minuszero = (lhs->Maybe(t->singleton_zero) && rmin < 0) ||
(rhs->Maybe(t->singleton_zero) && lmin < 0);
- Factory* f = t->isolate()->factory();
- Type* range = Type::Range(f->NewNumber(array_min(results, 4)),
- f->NewNumber(array_max(results, 4)), t->zone());
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
: range;
}
@@ -1116,10 +1172,10 @@ Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
Type* Typer::Visitor::JSModulusRanger(Type::RangeType* lhs,
Type::RangeType* rhs, Typer* t) {
- double lmin = lhs->Min()->Number();
- double lmax = lhs->Max()->Number();
- double rmin = rhs->Min()->Number();
- double rmax = rhs->Max()->Number();
+ double lmin = lhs->Min();
+ double lmax = lhs->Max();
+ double rmin = rhs->Min();
+ double rmax = rhs->Max();
double labs = std::max(std::abs(lmin), std::abs(lmax));
double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
@@ -1140,8 +1196,7 @@ Type* Typer::Visitor::JSModulusRanger(Type::RangeType* lhs,
maybe_minus_zero = true;
}
- Factory* f = t->isolate()->factory();
- Type* result = Type::Range(f->NewNumber(omin), f->NewNumber(omax), t->zone());
+ Type* result = Type::Range(omin, omax, t->zone());
if (maybe_minus_zero)
result = Type::Union(result, Type::MinusZero(), t->zone());
return result;
@@ -1248,43 +1303,53 @@ Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
// in the graph. In the current implementation, we are
// increasing the limits to the closest power of two.
Type* Typer::Visitor::Weaken(Type* current_type, Type* previous_type) {
- Type::RangeType* previous = previous_type->GetRange();
- Type::RangeType* current = current_type->GetRange();
- if (previous != NULL && current != NULL) {
- double current_min = current->Min()->Number();
- Handle<Object> new_min = current->Min();
-
- // Find the closest lower entry in the list of allowed
- // minima (or negative infinity if there is no such entry).
- if (current_min != previous->Min()->Number()) {
- new_min = typer_->integer->AsRange()->Min();
- for (const auto val : typer_->weaken_min_limits_) {
- if (val->Number() <= current_min) {
- new_min = val;
- break;
- }
+ // If the types have nothing to do with integers, return the types.
+ if (!current_type->Maybe(typer_->integer) ||
+ !previous_type->Maybe(typer_->integer)) {
+ return current_type;
+ }
+
+ Type* previous_number =
+ Type::Intersect(previous_type, typer_->integer, zone());
+ Type* current_number = Type::Intersect(current_type, typer_->integer, zone());
+ if (!current_number->IsRange() || !previous_number->IsRange()) {
+ return current_type;
+ }
+
+ Type::RangeType* previous = previous_number->AsRange();
+ Type::RangeType* current = current_number->AsRange();
+
+ double current_min = current->Min();
+ double new_min = current_min;
+ // Find the closest lower entry in the list of allowed
+ // minima (or negative infinity if there is no such entry).
+ if (current_min != previous->Min()) {
+ new_min = typer_->integer->AsRange()->Min();
+ for (const auto val : typer_->weaken_min_limits_) {
+ if (val <= current_min) {
+ new_min = val;
+ break;
}
}
+ }
- double current_max = current->Max()->Number();
- Handle<Object> new_max = current->Max();
- // Find the closest greater entry in the list of allowed
- // maxima (or infinity if there is no such entry).
- if (current_max != previous->Max()->Number()) {
- new_max = typer_->integer->AsRange()->Max();
- for (const auto val : typer_->weaken_max_limits_) {
- if (val->Number() >= current_max) {
- new_max = val;
- break;
- }
+ double current_max = current->Max();
+ double new_max = current_max;
+ // Find the closest greater entry in the list of allowed
+ // maxima (or infinity if there is no such entry).
+ if (current_max != previous->Max()) {
+ new_max = typer_->integer->AsRange()->Max();
+ for (const auto val : typer_->weaken_max_limits_) {
+ if (val >= current_max) {
+ new_max = val;
+ break;
}
}
-
- return Type::Union(current_type,
- Type::Range(new_min, new_max, typer_->zone()),
- typer_->zone());
}
- return current_type;
+
+ return Type::Union(current_type,
+ Type::Range(new_min, new_max, typer_->zone()),
+ typer_->zone());
}
@@ -1368,40 +1433,45 @@ Bounds Typer::Visitor::TypeJSStoreContext(Node* node) {
}
+Bounds Typer::Visitor::WrapContextBoundsForInput(Node* node) {
+ Bounds outer = BoundsOrNone(NodeProperties::GetContextInput(node));
+ if (outer.upper->Is(Type::None())) {
+ return Bounds(Type::None());
+ } else {
+ DCHECK(outer.upper->Maybe(Type::Internal()));
+ return Bounds(Type::Context(outer.upper, zone()));
+ }
+}
+
+
Bounds Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+ return WrapContextBoundsForInput(node);
}
Bounds Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+ return WrapContextBoundsForInput(node);
}
Bounds Typer::Visitor::TypeJSCreateWithContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+ return WrapContextBoundsForInput(node);
}
Bounds Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+ return WrapContextBoundsForInput(node);
}
Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
// TODO(rossberg): this is probably incorrect
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+ return WrapContextBoundsForInput(node);
}
Bounds Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+ return WrapContextBoundsForInput(node);
}
@@ -1429,6 +1499,16 @@ Bounds Typer::Visitor::TypeJSCallFunction(Node* node) {
Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
+ switch (CallRuntimeParametersOf(node->op()).id()) {
+ case Runtime::kInlineIsSmi:
+ case Runtime::kInlineIsNonNegativeSmi:
+ case Runtime::kInlineIsArray:
+ case Runtime::kInlineIsFunction:
+ case Runtime::kInlineIsRegExp:
+ return Bounds(Type::None(zone()), Type::Boolean(zone()));
+ default:
+ break;
+ }
return Bounds::Unbounded(zone());
}
@@ -1452,7 +1532,7 @@ Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
Bounds Typer::Visitor::TypeBooleanToNumber(Node* node) {
- return Bounds(Type::None(zone()), typer_->zero_or_one);
+ return TypeUnaryOp(node, ToNumber);
}
@@ -1506,6 +1586,11 @@ Bounds Typer::Visitor::TypeNumberToUint32(Node* node) {
}
+Bounds Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
+ return TypeUnaryOp(node, ToNumber);
+}
+
+
Bounds Typer::Visitor::TypeReferenceEqual(Node* node) {
return Bounds(Type::None(zone()), Type::Boolean(zone()));
}
@@ -1608,9 +1693,8 @@ Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) {
Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) {
Bounds arg = Operand(node, 0);
// TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::TaggedPointer(), zone()),
- ChangeRepresentation(arg.upper, Type::TaggedPointer(), zone()));
+ return Bounds(ChangeRepresentation(arg.lower, Type::TaggedPointer(), zone()),
+ ChangeRepresentation(arg.upper, Type::TaggedPointer(), zone()));
}
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index b65a9a5aff..a288d060a6 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -5,8 +5,6 @@
#ifndef V8_COMPILER_TYPER_H_
#define V8_COMPILER_TYPER_H_
-#include "src/v8.h"
-
#include "src/compiler/graph.h"
#include "src/compiler/opcodes.h"
#include "src/types.h"
@@ -21,7 +19,7 @@ class LazyTypeCache;
class Typer {
public:
- explicit Typer(Graph* graph, MaybeHandle<Context> context);
+ Typer(Isolate* isolate, Graph* graph, MaybeHandle<Context> context);
~Typer();
void Run();
@@ -29,12 +27,13 @@ class Typer {
Graph* graph() { return graph_; }
MaybeHandle<Context> context() { return context_; }
Zone* zone() { return graph_->zone(); }
- Isolate* isolate() { return zone()->isolate(); }
+ Isolate* isolate() { return isolate_; }
private:
class Visitor;
class Decorator;
+ Isolate* isolate_;
Graph* graph_;
MaybeHandle<Context> context_;
Decorator* decorator_;
@@ -64,8 +63,8 @@ class Typer {
Type* random_fun_;
LazyTypeCache* cache_;
- ZoneVector<Handle<Object> > weaken_min_limits_;
- ZoneVector<Handle<Object> > weaken_max_limits_;
+ ZoneVector<double> weaken_min_limits_;
+ ZoneVector<double> weaken_max_limits_;
DISALLOW_COPY_AND_ASSIGN(Typer);
};
diff --git a/deps/v8/src/compiler/value-numbering-reducer.cc b/deps/v8/src/compiler/value-numbering-reducer.cc
index 734b3e8e8e..555570d220 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.cc
+++ b/deps/v8/src/compiler/value-numbering-reducer.cc
@@ -50,7 +50,7 @@ ValueNumberingReducer::~ValueNumberingReducer() {}
Reduction ValueNumberingReducer::Reduce(Node* node) {
- if (!node->op()->HasProperty(Operator::kEliminatable)) return NoChange();
+ if (!node->op()->HasProperty(Operator::kIdempotent)) return NoChange();
const size_t hash = HashCode(node);
if (!entries_) {
@@ -135,7 +135,7 @@ void ValueNumberingReducer::Grow() {
Node** const old_entries = entries_;
size_t const old_capacity = capacity_;
capacity_ *= kCapacityToSizeRatio;
- entries_ = zone()->NewArray<Node*>(static_cast<int>(capacity_));
+ entries_ = zone()->NewArray<Node*>(capacity_);
memset(entries_, 0, sizeof(*entries_) * capacity_);
size_ = 0;
size_t const mask = capacity_ - 1;
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 693b414650..9480afb0e2 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -4,20 +4,21 @@
#include "src/compiler/verifier.h"
+#include <algorithm>
#include <deque>
#include <queue>
#include <sstream>
#include <string>
#include "src/bit-vector.h"
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/graph-inl.h"
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties.h"
#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
#include "src/ostreams.h"
@@ -28,29 +29,22 @@ namespace compiler {
static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
- Node::Uses uses = def->uses();
- for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
- if (*it == use) return true;
- }
- return false;
+ auto const uses = def->uses();
+ return std::find(uses.begin(), uses.end(), use) != uses.end();
}
static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
- Node::Inputs inputs = use->inputs();
- for (Node::Inputs::iterator it = inputs.begin(); it != inputs.end(); ++it) {
- if (*it == def) return true;
- }
- return false;
+ auto const inputs = use->inputs();
+ return std::find(inputs.begin(), inputs.end(), def) != inputs.end();
}
-class Verifier::Visitor : public NullNodeVisitor {
+class Verifier::Visitor {
public:
Visitor(Zone* z, Typing typed) : zone(z), typing(typed) {}
- // Fulfills the PreNodeCallback interface.
- void Pre(Node* node);
+ void Check(Node* node);
Zone* zone;
Typing typing;
@@ -74,51 +68,50 @@ class Verifier::Visitor : public NullNodeVisitor {
void CheckNotTyped(Node* node) {
if (NodeProperties::IsTyped(node)) {
std::ostringstream str;
- str << "TypeError: node #" << node->opcode() << ":"
- << node->op()->mnemonic() << " should never have a type";
- V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " should never have a type";
+ FATAL(str.str().c_str());
}
}
void CheckUpperIs(Node* node, Type* type) {
if (typing == TYPED && !bounds(node).upper->Is(type)) {
std::ostringstream str;
- str << "TypeError: node #" << node->opcode() << ":"
- << node->op()->mnemonic() << " upper bound ";
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " upper bound ";
bounds(node).upper->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+ FATAL(str.str().c_str());
}
}
void CheckUpperMaybe(Node* node, Type* type) {
if (typing == TYPED && !bounds(node).upper->Maybe(type)) {
std::ostringstream str;
- str << "TypeError: node #" << node->opcode() << ":"
- << node->op()->mnemonic() << " upper bound ";
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " upper bound ";
bounds(node).upper->PrintTo(str);
str << " must intersect ";
type->PrintTo(str);
- V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+ FATAL(str.str().c_str());
}
}
void CheckValueInputIs(Node* node, int i, Type* type) {
Node* input = ValueInput(node, i);
if (typing == TYPED && !bounds(input).upper->Is(type)) {
std::ostringstream str;
- str << "TypeError: node #" << node->opcode() << ":"
- << node->op()->mnemonic() << "(input @" << i << " = "
- << input->opcode() << ":" << input->op()->mnemonic()
- << ") upper bound ";
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << "(input @" << i << " = " << input->opcode() << ":"
+ << input->op()->mnemonic() << ") upper bound ";
bounds(input).upper->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+ FATAL(str.str().c_str());
}
}
};
-void Verifier::Visitor::Pre(Node* node) {
+void Verifier::Visitor::Check(Node* node) {
int value_count = node->op()->ValueInputCount();
int context_count = OperatorProperties::GetContextInputCount(node->op());
int frame_state_count =
@@ -185,6 +178,16 @@ void Verifier::Visitor::Pre(Node* node) {
}
switch (node->opcode()) {
+ case IrOpcode::kAlways:
+ // Always has no inputs.
+ CHECK_EQ(0, input_count);
+ // Always uses are Branch.
+ for (auto use : node->uses()) {
+ CHECK(use->opcode() == IrOpcode::kBranch);
+ }
+ // Type is boolean.
+ CheckUpperIs(node, Type::Boolean());
+ break;
case IrOpcode::kStart:
// Start has no inputs.
CHECK_EQ(0, input_count);
@@ -205,15 +208,15 @@ void Verifier::Visitor::Pre(Node* node) {
UNREACHABLE();
case IrOpcode::kBranch: {
// Branch uses are IfTrue and IfFalse.
- Node::Uses uses = node->uses();
int count_true = 0, count_false = 0;
- for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
- CHECK((*it)->opcode() == IrOpcode::kIfTrue ||
- (*it)->opcode() == IrOpcode::kIfFalse);
- if ((*it)->opcode() == IrOpcode::kIfTrue) ++count_true;
- if ((*it)->opcode() == IrOpcode::kIfFalse) ++count_false;
+ for (auto use : node->uses()) {
+ CHECK(use->opcode() == IrOpcode::kIfTrue ||
+ use->opcode() == IrOpcode::kIfFalse);
+ if (use->opcode() == IrOpcode::kIfTrue) ++count_true;
+ if (use->opcode() == IrOpcode::kIfFalse) ++count_false;
}
- CHECK(count_true == 1 && count_false == 1);
+ CHECK_EQ(1, count_true);
+ CHECK_EQ(1, count_false);
// Type is empty.
CheckNotTyped(node);
break;
@@ -225,6 +228,45 @@ void Verifier::Visitor::Pre(Node* node) {
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kSwitch: {
+ // Switch uses are Case and Default.
+ int count_case = 0, count_default = 0;
+ for (auto use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfValue: {
+ for (auto user : node->uses()) {
+ if (user != use && user->opcode() == IrOpcode::kIfValue) {
+ CHECK_NE(OpParameter<int32_t>(use->op()),
+ OpParameter<int32_t>(user->op()));
+ }
+ }
+ ++count_case;
+ break;
+ }
+ case IrOpcode::kIfDefault: {
+ ++count_default;
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ CHECK_LE(1, count_case);
+ CHECK_EQ(1, count_default);
+ CHECK_EQ(node->op()->ControlOutputCount(), count_case + count_default);
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
+ }
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
+ CHECK_EQ(IrOpcode::kSwitch,
+ NodeProperties::GetControlInput(node)->opcode());
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
case IrOpcode::kLoop:
case IrOpcode::kMerge:
CHECK_EQ(control_count, input_count);
@@ -241,11 +283,13 @@ void Verifier::Visitor::Pre(Node* node) {
// Type is empty.
CheckNotTyped(node);
break;
- case IrOpcode::kTerminate:
+ case IrOpcode::kOsrNormalEntry:
+ case IrOpcode::kOsrLoopEntry:
+ // Osr entries have
+ CHECK_EQ(1, effect_count);
+ CHECK_EQ(1, control_count);
// Type is empty.
CheckNotTyped(node);
- CHECK_EQ(1, control_count);
- CHECK_EQ(input_count, 1 + effect_count);
break;
// Common operators
@@ -297,9 +341,16 @@ void Verifier::Visitor::Pre(Node* node) {
// Type is considered internal.
CheckUpperIs(node, Type::Internal());
break;
+ case IrOpcode::kOsrValue:
+ // OSR values have a value and a control input.
+ CHECK_EQ(1, control_count);
+ CHECK_EQ(1, input_count);
+ // Type is merged from other values in the graph and could be any.
+ CheckUpperIs(node, Type::Any());
+ break;
case IrOpcode::kProjection: {
// Projection has an input that produces enough values.
- int index = static_cast<int>(OpParameter<size_t>(node->op()));
+ int index = static_cast<int>(ProjectionIndexOf(node->op()));
Node* input = NodeProperties::GetValueInput(node, 0);
CHECK_GT(input->op()->ValueOutputCount(), index);
// Type can be anything.
@@ -341,6 +392,12 @@ void Verifier::Visitor::Pre(Node* node) {
CHECK_EQ(input_count, 1 + effect_count);
break;
}
+ case IrOpcode::kEffectSet: {
+ CHECK_EQ(0, value_count);
+ CHECK_EQ(0, control_count);
+ CHECK_LT(1, effect_count);
+ break;
+ }
case IrOpcode::kValueEffect:
// TODO(rossberg): what are the constraints on these?
break;
@@ -525,6 +582,11 @@ void Verifier::Visitor::Pre(Node* node) {
CheckValueInputIs(node, 0, Type::Number());
CheckUpperIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kPlainPrimitiveToNumber:
+ // PlainPrimitive -> Number
+ CheckValueInputIs(node, 0, Type::PlainPrimitive());
+ CheckUpperIs(node, Type::Number());
+ break;
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual:
@@ -740,10 +802,11 @@ void Verifier::Visitor::Pre(Node* node) {
void Verifier::Run(Graph* graph, Typing typing) {
- Visitor visitor(graph->zone(), typing);
- CHECK_NE(NULL, graph->start());
- CHECK_NE(NULL, graph->end());
- graph->VisitNodeInputsFromEnd(&visitor);
+ CHECK_NOT_NULL(graph->start());
+ CHECK_NOT_NULL(graph->end());
+ Zone zone;
+ Visitor visitor(&zone, typing);
+ for (Node* node : AllNodes(&zone, graph).live) visitor.Check(node);
}
@@ -815,7 +878,7 @@ static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
void ScheduleVerifier::Run(Schedule* schedule) {
const size_t count = schedule->BasicBlockCount();
- Zone tmp_zone(schedule->zone()->isolate());
+ Zone tmp_zone;
Zone* zone = &tmp_zone;
BasicBlock* start = schedule->start();
BasicBlockVector* rpo_order = schedule->rpo_order();
@@ -826,15 +889,13 @@ void ScheduleVerifier::Run(Schedule* schedule) {
++b) {
CHECK_EQ((*b), schedule->GetBlockById((*b)->id()));
// All predecessors and successors should be in rpo and in this schedule.
- for (BasicBlock::Predecessors::iterator j = (*b)->predecessors_begin();
- j != (*b)->predecessors_end(); ++j) {
- CHECK_GE((*j)->rpo_number(), 0);
- CHECK_EQ((*j), schedule->GetBlockById((*j)->id()));
+ for (BasicBlock const* predecessor : (*b)->predecessors()) {
+ CHECK_GE(predecessor->rpo_number(), 0);
+ CHECK_EQ(predecessor, schedule->GetBlockById(predecessor->id()));
}
- for (BasicBlock::Successors::iterator j = (*b)->successors_begin();
- j != (*b)->successors_end(); ++j) {
- CHECK_GE((*j)->rpo_number(), 0);
- CHECK_EQ((*j), schedule->GetBlockById((*j)->id()));
+ for (BasicBlock const* successor : (*b)->successors()) {
+ CHECK_GE(successor->rpo_number(), 0);
+ CHECK_EQ(successor, schedule->GetBlockById(successor->id()));
}
}
@@ -846,10 +907,10 @@ void ScheduleVerifier::Run(Schedule* schedule) {
BasicBlock* dom = block->dominator();
if (b == 0) {
// All blocks except start should have a dominator.
- CHECK_EQ(NULL, dom);
+ CHECK_NULL(dom);
} else {
// Check that the immediate dominator appears somewhere before the block.
- CHECK_NE(NULL, dom);
+ CHECK_NOT_NULL(dom);
CHECK_LT(dom->rpo_number(), block->rpo_number());
}
}
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index 67b7ba6e71..8c5962fcad 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_VERIFIER_H_
#define V8_COMPILER_VERIFIER_H_
-#include "src/v8.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 0480f9dc90..973bbd1ef0 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -7,7 +7,6 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/scopes.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -249,6 +248,18 @@ class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
} while (0)
+#define ASSEMBLE_MOVX(asm_instr) \
+ do { \
+ if (instr->addressing_mode() != kMode_None) { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ } else if (instr->InputAt(0)->IsRegister()) { \
+ __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
+ } else { \
+ __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \
+ } \
+ } while (0)
+
+
#define ASSEMBLE_DOUBLE_BINOP(asm_instr) \
do { \
if (instr->InputAt(1)->IsDoubleRegister()) { \
@@ -533,6 +544,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
case kArchNop:
// don't emit code for nops.
break;
@@ -796,17 +813,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
break;
case kX64Movsxbl:
- if (instr->addressing_mode() != kMode_None) {
- __ movsxbl(i.OutputRegister(), i.MemoryOperand());
- } else if (instr->InputAt(0)->IsRegister()) {
- __ movsxbl(i.OutputRegister(), i.InputRegister(0));
- } else {
- __ movsxbl(i.OutputRegister(), i.InputOperand(0));
- }
+ ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxbl:
- __ movzxbl(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movzxbl);
+ __ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movb: {
int index = 0;
@@ -819,17 +831,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX64Movsxwl:
- if (instr->addressing_mode() != kMode_None) {
- __ movsxwl(i.OutputRegister(), i.MemoryOperand());
- } else if (instr->InputAt(0)->IsRegister()) {
- __ movsxwl(i.OutputRegister(), i.InputRegister(0));
- } else {
- __ movsxwl(i.OutputRegister(), i.InputOperand(0));
- }
+ ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxwl:
- __ movzxwl(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movw: {
@@ -864,14 +870,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
}
break;
- case kX64Movsxlq: {
- if (instr->InputAt(0)->IsRegister()) {
- __ movsxlq(i.OutputRegister(), i.InputRegister(0));
- } else {
- __ movsxlq(i.OutputRegister(), i.InputOperand(0));
- }
+ case kX64Movsxlq:
+ ASSEMBLE_MOVX(movsxlq);
break;
- }
case kX64Movq:
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
@@ -961,7 +962,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
- __ movsxlq(index, index);
__ movq(Operand(object, index, times_1, 0), value);
__ leaq(index, Operand(object, index, times_1, 0));
SaveFPRegsMode mode =
@@ -1041,27 +1041,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
case kSignedGreaterThan:
__ j(greater, tlabel);
break;
- case kUnorderedLessThan:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kUnsignedLessThan:
__ j(below, tlabel);
break;
- case kUnorderedGreaterThanOrEqual:
- __ j(parity_even, tlabel);
- // Fall through.
case kUnsignedGreaterThanOrEqual:
__ j(above_equal, tlabel);
break;
- case kUnorderedLessThanOrEqual:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kUnsignedLessThanOrEqual:
__ j(below_equal, tlabel);
break;
- case kUnorderedGreaterThan:
- __ j(parity_even, tlabel);
- // Fall through.
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
@@ -1122,35 +1110,15 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case kSignedGreaterThan:
cc = greater;
break;
- case kUnorderedLessThan:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedLessThan:
cc = below;
break;
- case kUnorderedGreaterThanOrEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedGreaterThanOrEqual:
cc = above_equal;
break;
- case kUnorderedLessThanOrEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedLessThanOrEqual:
cc = below_equal;
break;
- case kUnorderedGreaterThan:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedGreaterThan:
cc = above;
break;
@@ -1168,6 +1136,33 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ X64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmpl(input, Immediate(i.InputInt32(static_cast<int>(index + 0))));
+ __ j(equal, GetLabel(i.InputRpo(static_cast<int>(index + 1))));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ X64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (int32_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ cmpl(input, Immediate(case_count));
+ __ j(above_equal, GetLabel(i.InputRpo(1)));
+ __ leaq(kScratchRegister, Operand(table));
+ __ jmp(Operand(kScratchRegister, input, times_8, 0));
+}
+
+
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
@@ -1196,11 +1191,26 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ } else if (stack_slots > 0) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
+ stack_slots -= frame()->GetOsrStackSlotCount();
+ }
+
if (stack_slots > 0) {
__ subq(rsp, Immediate(stack_slots * kPointerSize));
}
@@ -1209,10 +1219,10 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ addq(rsp, Immediate(stack_slots * kPointerSize));
}
@@ -1232,13 +1242,15 @@ void CodeGenerator::AssembleReturn() {
__ popq(rbp); // Pop caller's frame pointer.
__ ret(0);
}
- } else {
+ } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
__ movq(rsp, rbp); // Move stack pointer back to frame pointer.
__ popq(rbp); // Pop caller's frame pointer.
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ ret(pop_count * kPointerSize);
+ } else {
+ __ ret(0);
}
}
@@ -1399,6 +1411,13 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ dq(targets[index]);
+ }
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index aba480de42..0689eb9adc 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -2,8 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -15,11 +18,6 @@ class X64OperandGenerator FINAL : public OperandGenerator {
explicit X64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* TempRegister(Register reg) {
- return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
- }
-
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
@@ -35,7 +33,7 @@ class X64OperandGenerator FINAL : public OperandGenerator {
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
Node* base, Node* displacement,
- InstructionOperand* inputs[],
+ InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
if (base != NULL) {
@@ -84,7 +82,7 @@ class X64OperandGenerator FINAL : public OperandGenerator {
}
AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
- InstructionOperand* inputs[],
+ InstructionOperand inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement64Matcher m(operand, true);
DCHECK(m.matches());
@@ -136,9 +134,9 @@ void InstructionSelector::VisitLoad(Node* node) {
return;
}
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand* inputs[3];
+ InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
@@ -160,8 +158,8 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
- Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
+ InstructionOperand temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
+ Emit(kX64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, rbx),
g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
temps);
return;
@@ -193,15 +191,15 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
- InstructionOperand* value_operand =
+ InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
- Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
+ Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
@@ -245,7 +243,7 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
return;
}
}
- InstructionOperand* length_operand =
+ InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
g.UseRegister(offset), g.TempImmediate(0), length_operand);
@@ -280,7 +278,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand* value_operand =
+ InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
Int32Matcher mlength(length);
@@ -288,16 +286,16 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
if (mlength.HasValue() && moffset.right().HasValue() &&
moffset.right().Value() >= 0 &&
mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, nullptr, g.UseRegister(buffer),
+ Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
g.UseRegister(moffset.left().node()),
g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
value_operand);
return;
}
}
- InstructionOperand* length_operand =
+ InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
+ Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
g.TempImmediate(0), length_operand, value_operand);
}
@@ -309,9 +307,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
@@ -323,7 +321,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
// mov rax, [rbp-0x10]
// add rax, [rbp-0x10]
// jo label
- InstructionOperand* const input = g.UseRegister(left);
+ InstructionOperand const input = g.UseRegister(left);
inputs[input_count++] = input;
inputs[input_count++] = input;
} else if (g.CanBeImmediate(right)) {
@@ -368,7 +366,15 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop(this, node, kX64And32);
+ X64OperandGenerator g(this);
+ Uint32BinopMatcher m(node);
+ if (m.right().Is(0xff)) {
+ Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
+ } else if (m.right().Is(0xffff)) {
+ Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
+ } else {
+ VisitBinop(this, node, kX64And32);
+ }
}
@@ -460,7 +466,7 @@ void EmitLea(InstructionSelector* selector, InstructionCode opcode,
Node* displacement) {
X64OperandGenerator g(selector);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
index, scale, base, displacement, inputs, &input_count);
@@ -468,7 +474,7 @@ void EmitLea(InstructionSelector* selector, InstructionCode opcode,
DCHECK_NE(0, static_cast<int>(input_count));
DCHECK_GE(arraysize(inputs), input_count);
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(result);
opcode = AddressingModeField::encode(mode) | opcode;
@@ -638,7 +644,7 @@ void VisitMulHigh(InstructionSelector* selector, Node* node,
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
X64OperandGenerator g(selector);
- InstructionOperand* temps[] = {g.TempRegister(rdx)};
+ InstructionOperand temps[] = {g.TempRegister(rdx)};
selector->Emit(
opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
@@ -806,12 +812,22 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Node* value = node->InputAt(0);
if (CanCover(node, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord64Sar:
- case IrOpcode::kWord64Shr: {
+ case IrOpcode::kWord64Sar: {
Int64BinopMatcher m(value);
- if (m.right().Is(32)) {
+ if (m.right().IsInRange(1, 32)) {
Emit(kX64Shr, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.TempImmediate(32));
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ break;
+ }
+ case IrOpcode::kWord64Shl: {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(1, 31)) {
+ Emit(kX64Shl32, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
return;
}
break;
@@ -820,7 +836,9 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
break;
}
}
- Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
+ // Otherwise truncation from 64-bit to 32-bit is a no-nop, as 32-bit
+ // operations just ignore the upper 64-bit.
+ Emit(kArchNop, g.DefineAsRegister(node), g.Use(value));
}
@@ -874,7 +892,7 @@ void InstructionSelector::VisitFloat64Div(Node* node) {
void InstructionSelector::VisitFloat64Mod(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand* temps[] = {g.TempRegister(rax)};
+ InstructionOperand temps[] = {g.TempRegister(rax)};
Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
temps);
@@ -938,11 +956,14 @@ void InstructionSelector::VisitCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
+ for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend();
+ ++i) {
// TODO(titzer): handle pushing double parameters.
- Emit(kX64Push, NULL,
- g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
+ InstructionOperand value =
+ g.CanBeImmediate(*i) ? g.UseImmediate(*i) : IsSupported(ATOM)
+ ? g.UseRegister(*i)
+ : g.Use(*i);
+ Emit(kX64Push, g.NoOutput(), value);
}
// Select the appropriate opcode based on the call type.
@@ -962,7 +983,7 @@ void InstructionSelector::VisitCall(Node* node) {
opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
- InstructionOperand** first_output =
+ InstructionOperand* first_output =
buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
Instruction* call_instr =
Emit(opcode, buffer.outputs.size(), first_output,
@@ -973,12 +994,13 @@ void InstructionSelector::VisitCall(Node* node) {
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
X64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()),
g.Label(cont->false_block()))->MarkAsControl();
} else {
DCHECK(cont->IsSet());
@@ -1027,11 +1049,12 @@ static void VisitCompareZero(InstructionSelector* selector, Node* node,
}
-// Shared routine for multiple float64 compare operations.
+// Shared routine for multiple float64 compare operations (inputs commuted).
static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
- cont, node->op()->HasProperty(Operator::kCommutative));
+ Node* const left = node->InputAt(0);
+ Node* const right = node->InputAt(1);
+ VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
}
@@ -1102,22 +1125,22 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
if (result == NULL || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
@@ -1150,6 +1173,70 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ X64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 4 + value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = g.TempRegister();
+ if (min_value) {
+ // The leal automatically zero extends, so result is a valid 64-bit index.
+ Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-min_value));
+ } else {
+ // Zero extend, because we use it as 64-bit index into the jump table.
+ Emit(kX64Movl, index_operand, value_operand);
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
Node* user = node;
FlagsContinuation cont(kEqual, node);
@@ -1247,7 +1334,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
VisitBinop(this, node, kX64Add32, &cont);
}
@@ -1257,7 +1344,7 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kX64Sub32, &cont);
}
@@ -1291,13 +1378,13 @@ void InstructionSelector::VisitFloat64Equal(Node* node) {
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/x64/linkage-x64.cc b/deps/v8/src/compiler/x64/linkage-x64.cc
index 0b76cc783c..802edf3554 100644
--- a/deps/v8/src/compiler/x64/linkage-x64.cc
+++ b/deps/v8/src/compiler/x64/linkage-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/assembler.h"
#include "src/code-stubs.h"
#include "src/compiler/linkage.h"
@@ -49,30 +47,32 @@ struct X64LinkageHelperTraits {
typedef LinkageHelper<X64LinkageHelperTraits> LH;
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+ return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties) {
return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
+ return LH::GetStubCallDescriptor(isolate, zone, descriptor,
+ stack_parameter_count, flags, properties);
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
+ const MachineSignature* sig) {
return LH::GetSimplifiedCDescriptor(zone, sig);
}
diff --git a/deps/v8/src/compiler/zone-pool.cc b/deps/v8/src/compiler/zone-pool.cc
index 179988d418..2006a79d2c 100644
--- a/deps/v8/src/compiler/zone-pool.cc
+++ b/deps/v8/src/compiler/zone-pool.cc
@@ -65,8 +65,7 @@ void ZonePool::StatsScope::ZoneReturned(Zone* zone) {
}
-ZonePool::ZonePool(Isolate* isolate)
- : isolate_(isolate), max_allocated_bytes_(0), total_deleted_bytes_(0) {}
+ZonePool::ZonePool() : max_allocated_bytes_(0), total_deleted_bytes_(0) {}
ZonePool::~ZonePool() {
@@ -104,10 +103,10 @@ Zone* ZonePool::NewEmptyZone() {
zone = unused_.back();
unused_.pop_back();
} else {
- zone = new Zone(isolate_);
+ zone = new Zone();
}
used_.push_back(zone);
- DCHECK_EQ(0, zone->allocation_size());
+ DCHECK_EQ(0u, zone->allocation_size());
return zone;
}
@@ -130,7 +129,7 @@ void ZonePool::ReturnZone(Zone* zone) {
delete zone;
} else {
zone->DeleteAll();
- DCHECK_EQ(0, zone->allocation_size());
+ DCHECK_EQ(0u, zone->allocation_size());
unused_.push_back(zone);
}
}
diff --git a/deps/v8/src/compiler/zone-pool.h b/deps/v8/src/compiler/zone-pool.h
index 8b43265c1a..09d6183e00 100644
--- a/deps/v8/src/compiler/zone-pool.h
+++ b/deps/v8/src/compiler/zone-pool.h
@@ -9,7 +9,7 @@
#include <set>
#include <vector>
-#include "src/v8.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -60,7 +60,7 @@ class ZonePool FINAL {
DISALLOW_COPY_AND_ASSIGN(StatsScope);
};
- explicit ZonePool(Isolate* isolate);
+ ZonePool();
~ZonePool();
size_t GetMaxAllocatedBytes();
@@ -76,7 +76,6 @@ class ZonePool FINAL {
typedef std::vector<Zone*> Used;
typedef std::vector<StatsScope*> Stats;
- Isolate* const isolate_;
Unused unused_;
Used used_;
Stats stats_;
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 57490a118f..6537e2c3e2 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -173,10 +173,6 @@ static void GetAttributesAndBindingFlags(VariableMode mode,
? IMMUTABLE_CHECK_INITIALIZED_HARMONY
: IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
- case MODULE:
- *attributes = READ_ONLY;
- *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
- break;
case DYNAMIC:
case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL:
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index cd3ff14211..f932c60092 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -100,8 +100,6 @@ enum BindingFlags {
V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
V(TO_LENGTH_FUN_INDEX, JSFunction, to_length_fun) \
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
- V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
@@ -140,7 +138,7 @@ enum BindingFlags {
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
- V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
+ V(FUNCTION_CACHE_INDEX, FixedArray, function_cache) \
V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
@@ -348,8 +346,6 @@ class Context: public FixedArray {
TO_INT32_FUN_INDEX,
TO_BOOLEAN_FUN_INDEX,
GLOBAL_EVAL_FUN_INDEX,
- INSTANTIATE_FUN_INDEX,
- CONFIGURE_INSTANCE_FUN_INDEX,
ARRAY_BUFFER_FUN_INDEX,
UINT8_ARRAY_FUN_INDEX,
INT8_ARRAY_FUN_INDEX,
@@ -572,20 +568,21 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
- static int FunctionMapIndex(StrictMode strict_mode, FunctionKind kind) {
+ static int FunctionMapIndex(LanguageMode language_mode, FunctionKind kind) {
if (IsGeneratorFunction(kind)) {
- return strict_mode == SLOPPY ? SLOPPY_GENERATOR_FUNCTION_MAP_INDEX
- : STRICT_GENERATOR_FUNCTION_MAP_INDEX;
+ return is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
+ : SLOPPY_GENERATOR_FUNCTION_MAP_INDEX;
}
- if (IsArrowFunction(kind) || IsConciseMethod(kind)) {
- return strict_mode == SLOPPY
- ? SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX
- : STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
+ if (IsArrowFunction(kind) || IsConciseMethod(kind) ||
+ IsAccessorFunction(kind)) {
+ return is_strict(language_mode)
+ ? STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX
+ : SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
}
- return strict_mode == SLOPPY ? SLOPPY_FUNCTION_MAP_INDEX
- : STRICT_FUNCTION_MAP_INDEX;
+ return is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
+ : SLOPPY_FUNCTION_MAP_INDEX;
}
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 972bd6862c..8cae329f52 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -47,8 +47,11 @@ void HistogramTimer::Start() {
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (Enabled()) {
- // Compute the delta between start and stop, in milliseconds.
- AddSample(static_cast<int>(timer_.Elapsed().InMilliseconds()));
+ int64_t sample = resolution_ == MICROSECOND
+ ? timer_.Elapsed().InMicroseconds()
+ : timer_.Elapsed().InMilliseconds();
+ // Compute the delta between start and stop, in microseconds.
+ AddSample(static_cast<int>(sample));
timer_.Stop();
}
Logger::CallEventLogger(isolate(), name(), Logger::END, true);
@@ -61,11 +64,16 @@ Counters::Counters(Isolate* isolate) {
HISTOGRAM_RANGE_LIST(HR)
#undef HR
-#define HT(name, caption) \
- name##_ = HistogramTimer(#caption, 0, 10000, 50, isolate);
+#define HT(name, caption, max, res) \
+ name##_ = HistogramTimer(#caption, 0, max, HistogramTimer::res, 50, isolate);
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define AHT(name, caption) \
+ name##_ = AggregatableHistogramTimer(#caption, 0, 10000000, 50, isolate);
+ AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
+#undef AHT
+
#define HP(name, caption) \
name##_ = Histogram(#caption, 0, 101, 100, isolate);
HISTOGRAM_PERCENTAGE_LIST(HP)
@@ -152,10 +160,14 @@ void Counters::ResetHistograms() {
HISTOGRAM_RANGE_LIST(HR)
#undef HR
-#define HT(name, caption) name##_.Reset();
+#define HT(name, caption, max, res) name##_.Reset();
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define AHT(name, caption) name##_.Reset();
+ AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
+#undef AHT
+
#define HP(name, caption) name##_.Reset();
HISTOGRAM_PERCENTAGE_LIST(HP)
#undef HP
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 41107cfc33..539b182915 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -8,6 +8,7 @@
#include "include/v8.h"
#include "src/allocation.h"
#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/time.h"
#include "src/globals.h"
#include "src/objects.h"
@@ -223,13 +224,16 @@ class Histogram {
// A HistogramTimer allows distributions of results to be created.
class HistogramTimer : public Histogram {
public:
- HistogramTimer() { }
- HistogramTimer(const char* name,
- int min,
- int max,
- int num_buckets,
- Isolate* isolate)
- : Histogram(name, min, max, num_buckets, isolate) {}
+ enum Resolution {
+ MILLISECOND,
+ MICROSECOND
+ };
+
+ HistogramTimer() {}
+ HistogramTimer(const char* name, int min, int max, Resolution resolution,
+ int num_buckets, Isolate* isolate)
+ : Histogram(name, min, max, num_buckets, isolate),
+ resolution_(resolution) {}
// Start the timer.
void Start();
@@ -249,6 +253,7 @@ class HistogramTimer : public Histogram {
private:
base::ElapsedTimer timer_;
+ Resolution resolution_;
};
// Helper class for scoping a HistogramTimer.
@@ -291,30 +296,104 @@ class HistogramTimerScope BASE_EMBEDDED {
#endif
};
+
+// A histogram timer that can aggregate events within a larger scope.
+//
+// Intended use of this timer is to have an outer (aggregating) and an inner
+// (to be aggregated) scope, where the inner scope measure the time of events,
+// and all those inner scope measurements will be summed up by the outer scope.
+// An example use might be to aggregate the time spent in lazy compilation
+// while running a script.
+//
+// Helpers:
+// - AggregatingHistogramTimerScope, the "outer" scope within which
+// times will be summed up.
+// - AggregatedHistogramTimerScope, the "inner" scope which defines the
+// events to be timed.
+class AggregatableHistogramTimer : public Histogram {
+ public:
+ AggregatableHistogramTimer() {}
+ AggregatableHistogramTimer(const char* name, int min, int max,
+ int num_buckets, Isolate* isolate)
+ : Histogram(name, min, max, num_buckets, isolate) {}
+
+ // Start/stop the "outer" scope.
+ void Start() { time_ = base::TimeDelta(); }
+ void Stop() { AddSample(static_cast<int>(time_.InMicroseconds())); }
+
+ // Add a time value ("inner" scope).
+ void Add(base::TimeDelta other) { time_ += other; }
+
+ private:
+ base::TimeDelta time_;
+};
+
+
+// A helper class for use with AggregatableHistogramTimer.
+class AggregatingHistogramTimerScope {
+ public:
+ explicit AggregatingHistogramTimerScope(AggregatableHistogramTimer* histogram)
+ : histogram_(histogram) {
+ histogram_->Start();
+ }
+ ~AggregatingHistogramTimerScope() { histogram_->Stop(); }
+
+ private:
+ AggregatableHistogramTimer* histogram_;
+};
+
+
+// A helper class for use with AggregatableHistogramTimer.
+class AggregatedHistogramTimerScope {
+ public:
+ explicit AggregatedHistogramTimerScope(AggregatableHistogramTimer* histogram)
+ : histogram_(histogram) {
+ timer_.Start();
+ }
+ ~AggregatedHistogramTimerScope() { histogram_->Add(timer_.Elapsed()); }
+
+ private:
+ base::ElapsedTimer timer_;
+ AggregatableHistogramTimer* histogram_;
+};
+
+
#define HISTOGRAM_RANGE_LIST(HR) \
/* Generic range histograms */ \
+ HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21) \
HR(gc_idle_time_allotted_in_ms, V8.GCIdleTimeAllottedInMS, 0, 10000, 101) \
HR(gc_idle_time_limit_overshot, V8.GCIdleTimeLimit.Overshot, 0, 10000, 101) \
- HR(gc_idle_time_limit_undershot, V8.GCIdleTimeLimit.Undershot, 0, 10000, 101)
-
-#define HISTOGRAM_TIMER_LIST(HT) \
- /* Garbage collection timers. */ \
- HT(gc_compactor, V8.GCCompactor) \
- HT(gc_scavenger, V8.GCScavenger) \
- HT(gc_context, V8.GCContext) /* GC context cleanup time */ \
- HT(gc_idle_notification, V8.GCIdleNotification) \
- HT(gc_incremental_marking, V8.GCIncrementalMarking) \
- HT(gc_low_memory_notification, V8.GCLowMemoryNotification) \
- /* Parsing timers. */ \
- HT(parse, V8.Parse) \
- HT(parse_lazy, V8.ParseLazy) \
- HT(pre_parse, V8.PreParse) \
- /* Total compilation times. */ \
- HT(compile, V8.Compile) \
- HT(compile_eval, V8.CompileEval) \
- /* Serialization as part of compilation (code caching) */ \
- HT(compile_serialize, V8.CompileSerialize) \
- HT(compile_deserialize, V8.CompileDeserialize)
+ HR(gc_idle_time_limit_undershot, V8.GCIdleTimeLimit.Undershot, 0, 10000, \
+ 101) \
+ HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6)
+
+#define HISTOGRAM_TIMER_LIST(HT) \
+ /* Garbage collection timers. */ \
+ HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND) \
+ HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
+ HT(gc_context, V8.GCContext, 10000, \
+ MILLISECOND) /* GC context cleanup time */ \
+ HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
+ HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND) \
+ HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000, \
+ MILLISECOND) \
+ /* Parsing timers. */ \
+ HT(parse, V8.ParseMicroSeconds, 1000000, MICROSECOND) \
+ HT(parse_lazy, V8.ParseLazyMicroSeconds, 1000000, MICROSECOND) \
+ HT(pre_parse, V8.PreParseMicroSeconds, 1000000, MICROSECOND) \
+ /* Compilation times. */ \
+ HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND) \
+ HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND) \
+ /* Serialization as part of compilation (code caching) */ \
+ HT(compile_serialize, V8.CompileSerializeMicroSeconds, 100000, MICROSECOND) \
+ HT(compile_deserialize, V8.CompileDeserializeMicroSeconds, 1000000, \
+ MICROSECOND) \
+ /* Total compilation time incl. caching/parsing */ \
+ HT(compile_script, V8.CompileScriptMicroSeconds, 1000000, MICROSECOND)
+
+
+#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
+ AHT(compile_lazy, V8.CompileLazyMicroSeconds)
#define HISTOGRAM_PERCENTAGE_LIST(HP) \
@@ -562,11 +641,16 @@ class Counters {
HISTOGRAM_RANGE_LIST(HR)
#undef HR
-#define HT(name, caption) \
+#define HT(name, caption, max, res) \
HistogramTimer* name() { return &name##_; }
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define AHT(name, caption) \
+ AggregatableHistogramTimer* name() { return &name##_; }
+ AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
+#undef AHT
+
#define HP(name, caption) \
Histogram* name() { return &name##_; }
HISTOGRAM_PERCENTAGE_LIST(HP)
@@ -614,9 +698,12 @@ class Counters {
#undef SC
enum Id {
-#define RATE_ID(name, caption) k_##name,
+#define RATE_ID(name, caption, max, res) k_##name,
HISTOGRAM_TIMER_LIST(RATE_ID)
#undef RATE_ID
+#define AGGREGATABLE_ID(name, caption) k_##name,
+ AGGREGATABLE_HISTOGRAM_TIMER_LIST(AGGREGATABLE_ID)
+#undef AGGREGATABLE_ID
#define PERCENTAGE_ID(name, caption) k_##name,
HISTOGRAM_PERCENTAGE_LIST(PERCENTAGE_ID)
#undef PERCENTAGE_ID
@@ -653,11 +740,15 @@ class Counters {
HISTOGRAM_RANGE_LIST(HR)
#undef HR
-#define HT(name, caption) \
- HistogramTimer name##_;
+#define HT(name, caption, max, res) HistogramTimer name##_;
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define AHT(name, caption) \
+ AggregatableHistogramTimer name##_;
+ AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
+#undef AHT
+
#define HP(name, caption) \
Histogram name##_;
HISTOGRAM_PERCENTAGE_LIST(HP)
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index c63a9c3cc2..0320ed5a79 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -36,6 +36,12 @@ void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
+void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ CodeEntry* entry = code_map->FindEntry(start);
+ if (entry != NULL) entry->set_deopt_info(deopt_reason, raw_position);
+}
+
+
void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->MoveCode(from, to);
}
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index 456770b4c6..c276bb6d60 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -7,6 +7,7 @@
#include "src/cpu-profiler-inl.h"
#include "src/compiler.h"
+#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/hashmap.h"
#include "src/log-inl.h"
@@ -38,6 +39,19 @@ void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
}
+void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
+ int fp_to_sp_delta) {
+ TickSampleEventRecord record(last_code_event_id_);
+ RegisterState regs;
+ Address fp = isolate->c_entry_fp(isolate->thread_local_top());
+ regs.sp = fp - fp_to_sp_delta;
+ regs.fp = fp;
+ regs.pc = from;
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame);
+ ticks_from_vm_buffer_.Enqueue(record);
+}
+
+
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
TickSampleEventRecord record(last_code_event_id_);
RegisterState regs;
@@ -329,6 +343,19 @@ void CpuProfiler::CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
}
+void CpuProfiler::CodeDeoptEvent(Code* code, int bailout_id, Address pc,
+ int fp_to_sp_delta) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
+ CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, bailout_id);
+ rec->start = code->address();
+ rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
+ rec->raw_position = info.raw_position;
+ processor_->Enqueue(evt_rec);
+ processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
+}
+
+
void CpuProfiler::CodeDeleteEvent(Address from) {
}
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 4dc5643aa1..140de3b906 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -23,12 +23,13 @@ class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
-#define CODE_EVENTS_TYPE_LIST(V) \
- V(CODE_CREATION, CodeCreateEventRecord) \
- V(CODE_MOVE, CodeMoveEventRecord) \
- V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
- V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
- V(REPORT_BUILTIN, ReportBuiltinEventRecord)
+#define CODE_EVENTS_TYPE_LIST(V) \
+ V(CODE_CREATION, CodeCreateEventRecord) \
+ V(CODE_MOVE, CodeMoveEventRecord) \
+ V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
+ V(CODE_DEOPT, CodeDeoptEventRecord) \
+ V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
+ V(REPORT_BUILTIN, ReportBuiltinEventRecord)
class CodeEventRecord {
@@ -75,6 +76,16 @@ class CodeDisableOptEventRecord : public CodeEventRecord {
};
+class CodeDeoptEventRecord : public CodeEventRecord {
+ public:
+ Address start;
+ const char* deopt_reason;
+ int raw_position;
+
+ INLINE(void UpdateCodeMap(CodeMap* code_map));
+};
+
+
class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
public:
Address from;
@@ -137,6 +148,7 @@ class ProfilerEventsProcessor : public base::Thread {
// Puts current stack into tick sample events buffer.
void AddCurrentStack(Isolate* isolate);
+ void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@@ -233,6 +245,8 @@ class CpuProfiler : public CodeEventListener {
virtual void CodeMovingGCEvent() {}
virtual void CodeMoveEvent(Address from, Address to);
virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
+ virtual void CodeDeoptEvent(Code* code, int bailout_id, Address pc,
+ int fp_to_sp_delta);
virtual void CodeDeleteEvent(Address from);
virtual void GetterCallbackEvent(Name* name, Address entry_point);
virtual void RegExpCodeCreateEvent(Code* code, String* source);
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 7157cb81ac..7fa6f8c42e 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -49,6 +49,10 @@
#include "src/v8.h"
#endif // !V8_SHARED
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+#include "src/startup-data-util.h"
+#endif // V8_USE_EXTERNAL_STARTUP_DATA
+
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#else
@@ -62,6 +66,10 @@
#define DCHECK(condition) assert(condition)
#endif
+#ifndef CHECK
+#define CHECK(condition) assert(condition)
+#endif
+
namespace v8 {
@@ -221,14 +229,17 @@ ScriptCompiler::CachedData* CompileForCachedData(
// Compile a string within the current v8 context.
-Local<UnboundScript> Shell::CompileString(
+Local<Script> Shell::CompileString(
Isolate* isolate, Local<String> source, Local<Value> name,
- ScriptCompiler::CompileOptions compile_options) {
+ ScriptCompiler::CompileOptions compile_options, SourceType source_type) {
ScriptOrigin origin(name);
if (compile_options == ScriptCompiler::kNoCompileOptions) {
ScriptCompiler::Source script_source(source, origin);
- return ScriptCompiler::CompileUnbound(isolate, &script_source,
- compile_options);
+ return source_type == SCRIPT
+ ? ScriptCompiler::Compile(isolate, &script_source,
+ compile_options)
+ : ScriptCompiler::CompileModule(isolate, &script_source,
+ compile_options);
}
ScriptCompiler::CachedData* data =
@@ -242,17 +253,20 @@ Local<UnboundScript> Shell::CompileString(
DCHECK(false); // A new compile option?
}
if (data == NULL) compile_options = ScriptCompiler::kNoCompileOptions;
- return ScriptCompiler::CompileUnbound(isolate, &cached_source,
- compile_options);
+ Local<Script> result =
+ source_type == SCRIPT
+ ? ScriptCompiler::Compile(isolate, &cached_source, compile_options)
+ : ScriptCompiler::CompileModule(isolate, &cached_source,
+ compile_options);
+ CHECK(data == NULL || !data->rejected);
+ return result;
}
// Executes a string within the current v8 context.
-bool Shell::ExecuteString(Isolate* isolate,
- Handle<String> source,
- Handle<Value> name,
- bool print_result,
- bool report_exceptions) {
+bool Shell::ExecuteString(Isolate* isolate, Handle<String> source,
+ Handle<Value> name, bool print_result,
+ bool report_exceptions, SourceType source_type) {
#ifndef V8_SHARED
bool FLAG_debugger = i::FLAG_debugger;
#else
@@ -266,61 +280,61 @@ bool Shell::ExecuteString(Isolate* isolate,
try_catch.SetVerbose(true);
}
- Handle<UnboundScript> script =
- Shell::CompileString(isolate, source, name, options.compile_options);
- if (script.IsEmpty()) {
- // Print errors that happened during compilation.
- if (report_exceptions && !FLAG_debugger)
- ReportException(isolate, &try_catch);
- return false;
- } else {
+ Handle<Value> result;
+ {
PerIsolateData* data = PerIsolateData::Get(isolate);
Local<Context> realm =
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
- realm->Enter();
- Handle<Value> result = script->BindToCurrentContext()->Run();
- realm->Exit();
- data->realm_current_ = data->realm_switch_;
- if (result.IsEmpty()) {
- DCHECK(try_catch.HasCaught());
- // Print errors that happened during execution.
+ Context::Scope context_scope(realm);
+ Handle<Script> script = Shell::CompileString(
+ isolate, source, name, options.compile_options, source_type);
+ if (script.IsEmpty()) {
+ // Print errors that happened during compilation.
if (report_exceptions && !FLAG_debugger)
ReportException(isolate, &try_catch);
return false;
- } else {
- DCHECK(!try_catch.HasCaught());
- if (print_result) {
-#if !defined(V8_SHARED)
- if (options.test_shell) {
-#endif
- if (!result->IsUndefined()) {
- // If all went well and the result wasn't undefined then print
- // the returned value.
- v8::String::Utf8Value str(result);
- fwrite(*str, sizeof(**str), str.length(), stdout);
- printf("\n");
- }
+ }
+ result = script->Run();
+ data->realm_current_ = data->realm_switch_;
+ }
+ if (result.IsEmpty()) {
+ DCHECK(try_catch.HasCaught());
+ // Print errors that happened during execution.
+ if (report_exceptions && !FLAG_debugger)
+ ReportException(isolate, &try_catch);
+ return false;
+ }
+ DCHECK(!try_catch.HasCaught());
+ if (print_result) {
#if !defined(V8_SHARED)
- } else {
- v8::TryCatch try_catch;
- v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(isolate, utility_context_);
- v8::Context::Scope context_scope(context);
- Handle<Object> global = context->Global();
- Handle<Value> fun =
- global->Get(String::NewFromUtf8(isolate, "Stringify"));
- Handle<Value> argv[1] = { result };
- Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv);
- if (try_catch.HasCaught()) return true;
- v8::String::Utf8Value str(s);
- fwrite(*str, sizeof(**str), str.length(), stdout);
- printf("\n");
- }
+ if (options.test_shell) {
#endif
+ if (!result->IsUndefined()) {
+ // If all went well and the result wasn't undefined then print
+ // the returned value.
+ v8::String::Utf8Value str(result);
+ fwrite(*str, sizeof(**str), str.length(), stdout);
+ printf("\n");
}
- return true;
+#if !defined(V8_SHARED)
+ } else {
+ v8::TryCatch try_catch;
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate, utility_context_);
+ v8::Context::Scope context_scope(context);
+ Handle<Object> global = context->Global();
+ Handle<Value> fun =
+ global->Get(String::NewFromUtf8(isolate, "Stringify"));
+ Handle<Value> argv[1] = {result};
+ Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv);
+ if (try_catch.HasCaught()) return true;
+ v8::String::Utf8Value str(s);
+ fwrite(*str, sizeof(**str), str.length(), stdout);
+ printf("\n");
}
+#endif
}
+ return true;
}
@@ -824,7 +838,20 @@ void Shell::AddHistogramSample(void* histogram, int sample) {
}
+class NoUseStrongForUtilityScriptScope {
+ public:
+ NoUseStrongForUtilityScriptScope() : flag_(i::FLAG_use_strong) {
+ i::FLAG_use_strong = false;
+ }
+ ~NoUseStrongForUtilityScriptScope() { i::FLAG_use_strong = flag_; }
+
+ private:
+ bool flag_;
+};
+
+
void Shell::InstallUtilityScript(Isolate* isolate) {
+ NoUseStrongForUtilityScriptScope no_use_strong;
HandleScope scope(isolate);
// If we use the utility context, we have to set the security tokens so that
// utility, evaluation and debug context can all access each other.
@@ -1189,6 +1216,7 @@ void SourceGroup::Execute(Isolate* isolate) {
bool exception_was_thrown = false;
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
+ Shell::SourceType source_type = Shell::SCRIPT;
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
HandleScope handle_scope(isolate);
@@ -1199,21 +1227,28 @@ void SourceGroup::Execute(Isolate* isolate) {
break;
}
++i;
+ continue;
+ } else if (strcmp(arg, "--module") == 0 && i + 1 < end_offset_) {
+ // Treat the next file as a module.
+ source_type = Shell::MODULE;
+ arg = argv_[++i];
} else if (arg[0] == '-') {
// Ignore other options. They have been parsed already.
- } else {
- // Use all other arguments as names of files to load and run.
- HandleScope handle_scope(isolate);
- Handle<String> file_name = String::NewFromUtf8(isolate, arg);
- Handle<String> source = ReadFile(isolate, arg);
- if (source.IsEmpty()) {
- printf("Error reading '%s'\n", arg);
- Shell::Exit(1);
- }
- if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
- exception_was_thrown = true;
- break;
- }
+ continue;
+ }
+
+ // Use all other arguments as names of files to load and run.
+ HandleScope handle_scope(isolate);
+ Handle<String> file_name = String::NewFromUtf8(isolate, arg);
+ Handle<String> source = ReadFile(isolate, arg);
+ if (source.IsEmpty()) {
+ printf("Error reading '%s'\n", arg);
+ Shell::Exit(1);
+ }
+ if (!Shell::ExecuteString(isolate, source, file_name, false, true,
+ source_type)) {
+ exception_was_thrown = true;
+ break;
}
}
if (exception_was_thrown != Shell::options.expected_to_throw) {
@@ -1398,6 +1433,8 @@ bool Shell::SetOptions(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ bool enable_harmony_modules = false;
+
// Set up isolated source groups.
options.isolate_sources = new SourceGroup[options.num_isolates];
SourceGroup* current = options.isolate_sources;
@@ -1408,6 +1445,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
current->End(i);
current++;
current->Begin(argv, i + 1);
+ } else if (strcmp(str, "--module") == 0) {
+ // Pass on to SourceGroup, which understands this option.
+ enable_harmony_modules = true;
} else if (strncmp(argv[i], "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]);
}
@@ -1418,6 +1458,10 @@ bool Shell::SetOptions(int argc, char* argv[]) {
SetFlagsFromString("--nologfile_per_isolate");
}
+ if (enable_harmony_modules) {
+ SetFlagsFromString("--harmony-modules");
+ }
+
return true;
}
@@ -1539,96 +1583,12 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
- void* Allocate(size_t) OVERRIDE { return malloc(0); }
- void* AllocateUninitialized(size_t length) OVERRIDE { return malloc(0); }
+ void* Allocate(size_t) OVERRIDE { return malloc(1); }
+ void* AllocateUninitialized(size_t length) OVERRIDE { return malloc(1); }
void Free(void* p, size_t) OVERRIDE { free(p); }
};
-#ifdef V8_USE_EXTERNAL_STARTUP_DATA
-class StartupDataHandler {
- public:
- StartupDataHandler(const char* exec_path, const char* natives_blob,
- const char* snapshot_blob) {
- // If we have (at least one) explicitly given blob, use those.
- // If not, use the default blob locations next to the d8 binary.
- if (natives_blob || snapshot_blob) {
- LoadFromFiles(natives_blob, snapshot_blob);
- } else {
- char* natives;
- char* snapshot;
- LoadFromFiles(RelativePath(&natives, exec_path, "natives_blob.bin"),
- RelativePath(&snapshot, exec_path, "snapshot_blob.bin"));
-
- free(natives);
- free(snapshot);
- }
- }
-
- ~StartupDataHandler() {
- delete[] natives_.data;
- delete[] snapshot_.data;
- }
-
- private:
- static char* RelativePath(char** buffer, const char* exec_path,
- const char* name) {
- DCHECK(exec_path);
- const char* last_slash = strrchr(exec_path, '/');
- if (last_slash) {
- int after_slash = last_slash - exec_path + 1;
- int name_length = static_cast<int>(strlen(name));
- *buffer =
- reinterpret_cast<char*>(calloc(after_slash + name_length + 1, 1));
- strncpy(*buffer, exec_path, after_slash);
- strncat(*buffer, name, name_length);
- } else {
- *buffer = strdup(name);
- }
- return *buffer;
- }
-
- void LoadFromFiles(const char* natives_blob, const char* snapshot_blob) {
- Load(natives_blob, &natives_, v8::V8::SetNativesDataBlob);
- Load(snapshot_blob, &snapshot_, v8::V8::SetSnapshotDataBlob);
- }
-
- void Load(const char* blob_file,
- v8::StartupData* startup_data,
- void (*setter_fn)(v8::StartupData*)) {
- startup_data->data = NULL;
- startup_data->raw_size = 0;
-
- if (!blob_file)
- return;
-
- FILE* file = fopen(blob_file, "rb");
- if (!file)
- return;
-
- fseek(file, 0, SEEK_END);
- startup_data->raw_size = ftell(file);
- rewind(file);
-
- startup_data->data = new char[startup_data->raw_size];
- int read_size =
- static_cast<int>(fread(const_cast<char*>(startup_data->data), 1,
- startup_data->raw_size, file));
- fclose(file);
-
- if (startup_data->raw_size == read_size) (*setter_fn)(startup_data);
- }
-
- v8::StartupData natives_;
- v8::StartupData snapshot_;
-
- // Disallow copy & assign.
- StartupDataHandler(const StartupDataHandler& other);
- void operator=(const StartupDataHandler& other);
-};
-#endif // V8_USE_EXTERNAL_STARTUP_DATA
-
-
int Shell::Main(int argc, char* argv[]) {
#if (defined(_WIN32) || defined(_WIN64))
UINT new_flags =
@@ -1651,8 +1611,8 @@ int Shell::Main(int argc, char* argv[]) {
v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
- StartupDataHandler startup_data(argv[0], options.natives_blob,
- options.snapshot_blob);
+ v8::StartupDataHandler startup_data(argv[0], options.natives_blob,
+ options.snapshot_blob);
#endif
SetFlagsFromString("--trace-hydrogen-file=hydrogen.cfg");
SetFlagsFromString("--trace-turbo-cfg-file=turbo.cfg");
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index e53658493a..a78551599f 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -49,6 +49,8 @@
],
'sources': [
'd8.cc',
+ 'startup-data-util.h',
+ 'startup-data-util.cc'
],
'conditions': [
[ 'want_separate_host_toolset==1', {
@@ -60,7 +62,7 @@
}],
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android" \
- or OS=="qnx")', {
+ or OS=="qnx" or OS=="aix")', {
'sources': [ 'd8-posix.cc', ]
}],
[ 'OS=="win"', {
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index caa5a0adba..90dde1541e 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -248,14 +248,16 @@ class Shell : public i::AllStatic {
#endif // V8_SHARED
public:
- static Local<UnboundScript> CompileString(
+ enum SourceType { SCRIPT, MODULE };
+
+ static Local<Script> CompileString(
Isolate* isolate, Local<String> source, Local<Value> name,
- v8::ScriptCompiler::CompileOptions compile_options);
- static bool ExecuteString(Isolate* isolate,
- Handle<String> source,
- Handle<Value> name,
- bool print_result,
- bool report_exceptions);
+ v8::ScriptCompiler::CompileOptions compile_options,
+ SourceType source_type);
+ static bool ExecuteString(Isolate* isolate, Handle<String> source,
+ Handle<Value> name, bool print_result,
+ bool report_exceptions,
+ SourceType source_type = SCRIPT);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
static Handle<String> ReadFile(Isolate* isolate, const char* name);
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 6b95cb7212..1f879de135 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -103,8 +103,8 @@ void DateCache::YearMonthDayFromDays(
days += is_leap;
// Check if the date is after February.
- if (days >= 31 + 28 + is_leap) {
- days -= 31 + 28 + is_leap;
+ if (days >= 31 + 28 + BoolToInt(is_leap)) {
+ days -= 31 + 28 + BoolToInt(is_leap);
// Find the date starting from March.
for (int i = 2; i < 12; i++) {
if (days < kDaysInMonths[i]) {
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 87c87bfda6..40ab1d25d6 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -747,6 +747,12 @@ function CheckDateCacheCurrent() {
}
+function CreateDate(time) {
+ var date = new $Date();
+ date.setTime(time);
+ return date;
+}
+
// -------------------------------------------------------------------
function SetUpDate() {
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index cdcb0a759f..324d96f333 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -716,7 +716,7 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
debug_info_ = Handle<DebugInfo>::cast(global_handles->Create(debug_info));
typedef PhantomCallbackData<void>::Callback Callback;
GlobalHandles::MakePhantom(
- reinterpret_cast<Object**>(debug_info_.location()), this,
+ reinterpret_cast<Object**>(debug_info_.location()), this, 0,
reinterpret_cast<Callback>(Debug::HandlePhantomDebugInfo));
}
@@ -744,8 +744,8 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
// Compile the script.
Handle<SharedFunctionInfo> function_info;
function_info = Compiler::CompileScript(
- source_code, script_name, 0, 0, false, context, NULL, NULL,
- ScriptCompiler::kNoCompileOptions, NATIVES_CODE);
+ source_code, script_name, 0, 0, false, false, context, NULL, NULL,
+ ScriptCompiler::kNoCompileOptions, NATIVES_CODE, false);
// Silently ignore stack overflows during compilation.
if (function_info.is_null()) {
@@ -2351,7 +2351,7 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Handle the jump to continue execution after break point depending on the
// break location.
if (at_js_return) {
- // If the break point as return is still active jump to the corresponding
+ // If the break point at return is still active jump to the corresponding
// place in the original code. If not the break point was removed during
// break point processing.
if (break_at_js_return_active) {
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 748f95eff7..665279a0ad 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -110,7 +110,7 @@ size_t Deoptimizer::GetMaxDeoptTableSize() {
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
Deoptimizer* result = isolate->deoptimizer_data()->current_;
- CHECK_NE(result, NULL);
+ CHECK_NOT_NULL(result);
result->DeleteFrameDescriptions();
isolate->deoptimizer_data()->current_ = NULL;
return result;
@@ -367,7 +367,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// Move marked code from the optimized code list to the deoptimized
// code list, collecting them into a ZoneList.
- Zone zone(isolate);
+ Zone zone;
ZoneList<Code*> codes(10, &zone);
// Walk over all optimized code objects in this native context.
@@ -592,7 +592,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
}
}
compiled_code_ = FindOptimizedCode(function, optimized_code);
-
#if DEBUG
DCHECK(compiled_code_ != NULL);
if (type == EAGER || type == SOFT || type == LAZY) {
@@ -609,6 +608,10 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
+ if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
+ PROFILE(isolate_, CodeDeoptEvent(compiled_code_, bailout_id_, from_,
+ fp_to_sp_delta_));
+ }
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
input_->SetFrameType(frame_type);
@@ -747,11 +750,6 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
- // Print some helpful diagnostic information.
- if (FLAG_log_timer_events &&
- compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
- LOG(isolate(), CodeDeoptEvent(compiled_code_));
- }
base::ElapsedTimer timer;
// Determine basic deoptimization information. The optimized frame is
@@ -901,7 +899,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
CHECK(frame_index >= 0 && frame_index < output_count_);
- CHECK_EQ(output_[frame_index], NULL);
+ CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
// The top address for the bottommost output frame can be computed from
@@ -929,7 +927,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
+ int parameter_count =
+ function->shared()->internal_formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
@@ -1060,7 +1059,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
- CHECK_EQ(0, output_offset);
+ CHECK_EQ(0u, output_offset);
// Compute this frame's PC, state, and continuation.
Code* non_optimized_code = function->shared()->code();
@@ -1382,7 +1381,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
top_address + output_offset, output_offset, value);
}
- CHECK_EQ(0, output_offset);
+ CHECK_EQ(0u, output_offset);
intptr_t pc = reinterpret_cast<intptr_t>(
construct_stub->instruction_start() +
@@ -1429,7 +1428,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
// A frame for an accessor stub can not be the topmost or bottommost one.
CHECK(frame_index > 0 && frame_index < output_count_ - 1);
- CHECK_EQ(output_[frame_index], NULL);
+ CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
@@ -1522,7 +1521,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
DoTranslateCommand(iterator, frame_index, output_offset);
}
- CHECK_EQ(output_offset, 0);
+ CHECK_EQ(0u, output_offset);
Smi* offset = is_setter_stub_frame ?
isolate_->heap()->setter_stub_deopt_pc_offset() :
@@ -1735,7 +1734,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
}
- CHECK_EQ(output_frame_offset, 0);
+ CHECK_EQ(0u, output_frame_offset);
if (!arg_count_known) {
CHECK_GE(arguments_length_offset, 0);
@@ -2726,7 +2725,8 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
CHECK_EQ(Smi::cast(function), Smi::FromInt(StackFrame::STUB));
return 0;
}
- unsigned arguments = function->shared()->formal_parameter_count() + 1;
+ unsigned arguments =
+ function->shared()->internal_formal_parameter_count() + 1;
return arguments * kPointerSize;
}
@@ -2863,7 +2863,7 @@ unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
int FrameDescription::ComputeParametersCount() {
switch (type_) {
case StackFrame::JAVA_SCRIPT:
- return function_->shared()->formal_parameter_count();
+ return function_->shared()->internal_formal_parameter_count();
case StackFrame::ARGUMENTS_ADAPTOR: {
// Last slot contains number of incomming arguments as a smi.
// Can't use GetExpression(0) because it would cause infinite recursion.
@@ -3190,7 +3190,10 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
int inlined_jsframe_index,
int formal_parameter_count)
- : current_slot_(0), args_length_(-1), first_slot_index_(-1) {
+ : current_slot_(0),
+ args_length_(-1),
+ first_slot_index_(-1),
+ should_deoptimize_(false) {
DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
@@ -3208,7 +3211,6 @@ SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
CHECK_GT(jsframe_count, inlined_jsframe_index);
int jsframes_to_skip = inlined_jsframe_index;
int number_of_slots = -1; // Number of slots inside our frame (yet unknown)
- bool should_deopt = false;
while (number_of_slots != 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
bool processed = false;
@@ -3265,7 +3267,7 @@ SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
number_of_slots += slot.GetChildrenCount();
if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
- should_deopt = true;
+ should_deoptimize_ = true;
}
}
@@ -3276,7 +3278,7 @@ SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
it.Skip(Translation::NumberOfOperandsFor(opcode));
}
}
- if (should_deopt) {
+ if (should_deoptimize_) {
List<JSFunction*> functions(2);
frame->GetFunctions(&functions);
Deoptimizer::DeoptimizeFunction(functions[0]);
@@ -3286,8 +3288,13 @@ SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
Handle<Object> SlotRef::GetValue(Isolate* isolate) {
switch (representation_) {
- case TAGGED:
- return Handle<Object>(Memory::Object_at(addr_), isolate);
+ case TAGGED: {
+ Handle<Object> value(Memory::Object_at(addr_), isolate);
+ if (value->IsMutableHeapNumber()) {
+ HeapNumber::cast(*value)->set_map(isolate->heap()->heap_number_map());
+ }
+ return value;
+ }
case INT32: {
#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
@@ -3388,9 +3395,9 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
case SlotRef::INT32:
case SlotRef::UINT32:
case SlotRef::DOUBLE:
- case SlotRef::LITERAL: {
+ case SlotRef::LITERAL:
return slot.GetValue(isolate);
- }
+
case SlotRef::ARGUMENTS_OBJECT: {
// We should never need to materialize an arguments object,
// but we still need to put something into the array
@@ -3492,9 +3499,11 @@ void SlotRefValueBuilder::Finish(Isolate* isolate) {
// We should have processed all the slots
CHECK_EQ(slot_refs_.length(), current_slot_);
- if (materialized_objects_.length() > prev_materialized_count_) {
- // We have materialized some new objects, so we have to store them
- // to prevent duplicate materialization
+ if (should_deoptimize_ &&
+ materialized_objects_.length() > prev_materialized_count_) {
+ // We have materialized some new objects and they might be accessible
+ // from the arguments object, so we have to store them
+ // to prevent duplicate materialization.
Handle<FixedArray> array = isolate->factory()->NewFixedArray(
materialized_objects_.length());
for (int i = 0; i < materialized_objects_.length(); i++) {
@@ -3628,4 +3637,45 @@ void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
}
+
+const char* Deoptimizer::GetDeoptReason(DeoptReason deopt_reason) {
+ DCHECK(deopt_reason < kLastDeoptReason);
+#define DEOPT_MESSAGES_TEXTS(C, T) T,
+ static const char* deopt_messages_[] = {
+ DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_TEXTS)};
+#undef DEOPT_MESSAGES_TEXTS
+ return deopt_messages_[deopt_reason];
+}
+
+
+Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, int bailout_id) {
+ int last_position = 0;
+ Isolate* isolate = code->GetIsolate();
+ Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
+ int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
+ RelocInfo::ModeMask(RelocInfo::POSITION) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ for (RelocIterator it(code, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->rmode() == RelocInfo::POSITION) {
+ last_position = static_cast<int>(info->data());
+ } else if (info->rmode() == RelocInfo::DEOPT_REASON) {
+ last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
+ } else if (last_reason != Deoptimizer::kNoReason) {
+ if ((bailout_id ==
+ Deoptimizer::GetDeoptimizationId(isolate, info->target_address(),
+ Deoptimizer::EAGER)) ||
+ (bailout_id ==
+ Deoptimizer::GetDeoptimizationId(isolate, info->target_address(),
+ Deoptimizer::SOFT)) ||
+ (bailout_id ==
+ Deoptimizer::GetDeoptimizationId(isolate, info->target_address(),
+ Deoptimizer::LAZY))) {
+ CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
+ return DeoptInfo(last_position, NULL, last_reason);
+ }
+ }
+ }
+ return DeoptInfo(0, NULL, Deoptimizer::kNoReason);
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 612d5f6ecf..471a05d9b0 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -9,7 +9,6 @@
#include "src/allocation.h"
#include "src/macro-assembler.h"
-#include "src/zone-inl.h"
namespace v8 {
@@ -88,6 +87,83 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
};
+#define DEOPT_MESSAGES_LIST(V) \
+ V(kNoReason, "no reason") \
+ V(kConstantGlobalVariableAssignment, "Constant global variable assignment") \
+ V(kConversionOverflow, "conversion overflow") \
+ V(kDivisionByZero, "division by zero") \
+ V(kElementsKindUnhandledInKeyedLoadGenericStub, \
+ "ElementsKind unhandled in KeyedLoadGenericStub") \
+ V(kExpectedHeapNumber, "Expected heap number") \
+ V(kExpectedSmi, "Expected smi") \
+ V(kForcedDeoptToRuntime, "Forced deopt to runtime") \
+ V(kHole, "hole") \
+ V(kHoleyArrayDespitePackedElements_kindFeedback, \
+ "Holey array despite packed elements_kind feedback") \
+ V(kInstanceMigrationFailed, "instance migration failed") \
+ V(kInsufficientTypeFeedbackForCallWithArguments, \
+ "Insufficient type feedback for call with arguments") \
+ V(kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
+ "Insufficient type feedback for combined type of binary operation") \
+ V(kInsufficientTypeFeedbackForGenericNamedAccess, \
+ "Insufficient type feedback for generic named access") \
+ V(kInsufficientTypeFeedbackForKeyedLoad, \
+ "Insufficient type feedback for keyed load") \
+ V(kInsufficientTypeFeedbackForKeyedStore, \
+ "Insufficient type feedback for keyed store") \
+ V(kInsufficientTypeFeedbackForLHSOfBinaryOperation, \
+ "Insufficient type feedback for LHS of binary operation") \
+ V(kInsufficientTypeFeedbackForRHSOfBinaryOperation, \
+ "Insufficient type feedback for RHS of binary operation") \
+ V(kKeyIsNegative, "key is negative") \
+ V(kLostPrecision, "lost precision") \
+ V(kLostPrecisionOrNaN, "lost precision or NaN") \
+ V(kMementoFound, "memento found") \
+ V(kMinusZero, "minus zero") \
+ V(kNaN, "NaN") \
+ V(kNegativeKeyEncountered, "Negative key encountered") \
+ V(kNegativeValue, "negative value") \
+ V(kNoCache, "no cache") \
+ V(kNonStrictElementsInKeyedLoadGenericStub, \
+ "non-strict elements in KeyedLoadGenericStub") \
+ V(kNotADateObject, "not a date object") \
+ V(kNotAHeapNumber, "not a heap number") \
+ V(kNotAHeapNumberUndefinedBoolean, "not a heap number/undefined/true/false") \
+ V(kNotAHeapNumberUndefined, "not a heap number/undefined") \
+ V(kNotAJavaScriptObject, "not a JavaScript object") \
+ V(kNotASmi, "not a Smi") \
+ V(kNotHeapNumber, "not heap number") \
+ V(kNull, "null") \
+ V(kOutOfBounds, "out of bounds") \
+ V(kOutsideOfRange, "Outside of range") \
+ V(kOverflow, "overflow") \
+ V(kReceiverWasAGlobalObject, "receiver was a global object") \
+ V(kSmi, "Smi") \
+ V(kTooManyArguments, "too many arguments") \
+ V(kTooManyUndetectableTypes, "Too many undetectable types") \
+ V(kTracingElementsTransitions, "Tracing elements transitions") \
+ V(kTypeMismatchBetweenFeedbackAndConstant, \
+ "Type mismatch between feedback and constant") \
+ V(kUndefined, "undefined") \
+ V(kUnexpectedCellContentsInConstantGlobalStore, \
+ "Unexpected cell contents in constant global store") \
+ V(kUnexpectedCellContentsInGlobalStore, \
+ "Unexpected cell contents in global store") \
+ V(kUnexpectedObject, "unexpected object") \
+ V(kUnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
+ V(kUninitializedBoilerplateInFastClone, \
+ "Uninitialized boilerplate in fast clone") \
+ V(kUninitializedBoilerplateLiterals, "Uninitialized boilerplate literals") \
+ V(kUnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
+ V(kUnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
+ V(kUnknownMapInPolymorphicElementAccess, \
+ "Unknown map in polymorphic element access") \
+ V(kUnknownMap, "Unknown map") \
+ V(kValueMismatch, "value mismatch") \
+ V(kWrongInstanceType, "wrong instance type") \
+ V(kWrongMap, "wrong map")
+
+
class Deoptimizer : public Malloced {
public:
enum BailoutType {
@@ -96,46 +172,45 @@ class Deoptimizer : public Malloced {
SOFT,
// This last bailout type is not really a bailout, but used by the
// debugger to deoptimize stack frames to allow inspection.
- DEBUGGER
+ DEBUGGER,
+ kBailoutTypesWithCodeEntry = SOFT + 1
};
- static const int kBailoutTypesWithCodeEntry = SOFT + 1;
-
- struct Reason {
- Reason(int r, const char* m, const char* d)
- : raw_position(r), mnemonic(m), detail(d) {}
-
- bool operator==(const Reason& other) const {
- return raw_position == other.raw_position &&
- CStringEquals(mnemonic, other.mnemonic) &&
- CStringEquals(detail, other.detail);
- }
+#define DEOPT_MESSAGES_CONSTANTS(C, T) C,
+ enum DeoptReason {
+ DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_CONSTANTS) kLastDeoptReason
+ };
+#undef DEOPT_MESSAGES_CONSTANTS
+ static const char* GetDeoptReason(DeoptReason deopt_reason);
- bool operator!=(const Reason& other) const { return !(*this == other); }
+ struct DeoptInfo {
+ DeoptInfo(int r, const char* m, DeoptReason d)
+ : raw_position(r), mnemonic(m), deopt_reason(d) {}
int raw_position;
const char* mnemonic;
- const char* detail;
+ DeoptReason deopt_reason;
};
+ static DeoptInfo GetDeoptInfo(Code* code, int bailout_id);
+
struct JumpTableEntry : public ZoneObject {
- inline JumpTableEntry(Address entry, const Reason& the_reason,
+ inline JumpTableEntry(Address entry, const DeoptInfo& deopt_info,
Deoptimizer::BailoutType type, bool frame)
: label(),
address(entry),
- reason(the_reason),
+ deopt_info(deopt_info),
bailout_type(type),
needs_frame(frame) {}
bool IsEquivalentTo(const JumpTableEntry& other) const {
return address == other.address && bailout_type == other.bailout_type &&
- needs_frame == other.needs_frame &&
- (!FLAG_trace_deopt || reason == other.reason);
+ needs_frame == other.needs_frame;
}
Label label;
Address address;
- Reason reason;
+ DeoptInfo deopt_info;
Deoptimizer::BailoutType bailout_type;
bool needs_frame;
};
@@ -864,6 +939,7 @@ class SlotRefValueBuilder BASE_EMBEDDED {
int current_slot_;
int args_length_;
int first_slot_index_;
+ bool should_deoptimize_;
static SlotRef ComputeSlotForNextArgument(
Translation::Opcode opcode,
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index bedff451e9..e0316441af 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -85,11 +85,14 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
} else {
// No relocation information when printing code stubs.
}
+#if !V8_TARGET_ARCH_PPC
int constants = -1; // no constants being decoded at the start
+#endif
while (pc < end) {
// First decode instruction so that we know its length.
byte* prev_pc = pc;
+#if !V8_TARGET_ARCH_PPC
if (constants > 0) {
SNPrintF(decode_buffer,
"%08x constant",
@@ -112,12 +115,31 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
"%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
reinterpret_cast<intptr_t>(ptr),
ptr - begin);
- pc += 4;
+ pc += sizeof(ptr);
} else {
decode_buffer[0] = '\0';
pc += d.InstructionDecode(decode_buffer, pc);
}
}
+#else // !V8_TARGET_ARCH_PPC
+#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+ // Function descriptors are specially decoded and skipped.
+ // Other internal references (load of ool constant pool pointer)
+ // are not since they are a encoded as a regular mov sequence.
+ int skip;
+ if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
+ it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE &&
+ (skip = Assembler::DecodeInternalReference(decode_buffer, pc))) {
+ pc += skip;
+ } else {
+ decode_buffer[0] = '\0';
+ pc += d.InstructionDecode(decode_buffer, pc);
+ }
+#else
+ decode_buffer[0] = '\0';
+ pc += d.InstructionDecode(decode_buffer, pc);
+#endif // ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+#endif // !V8_TARGET_ARCH_PPC
// Collect RelocInfo for this instruction (prev_pc .. pc-1)
List<const char*> comments(4);
@@ -173,6 +195,11 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
} else {
out.AddFormatted(" ;; debug: position %d", relocinfo.data());
}
+ } else if (rmode == RelocInfo::DEOPT_REASON) {
+ Deoptimizer::DeoptReason reason =
+ static_cast<Deoptimizer::DeoptReason>(relocinfo.data());
+ out.AddFormatted(" ;; debug: deopt reason '%s'",
+ Deoptimizer::GetDeoptReason(reason));
} else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 4e9a0527c3..cd7ba984af 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -587,19 +587,15 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateImpl(holder);
}
- static bool HasElementImpl(Handle<Object> receiver,
- Handle<JSObject> holder,
- uint32_t key,
+ static bool HasElementImpl(Handle<JSObject> holder, uint32_t key,
Handle<FixedArrayBase> backing_store) {
- return ElementsAccessorSubclass::GetAttributesImpl(
- receiver, holder, key, backing_store) != ABSENT;
+ return ElementsAccessorSubclass::GetAttributesImpl(holder, key,
+ backing_store) != ABSENT;
}
- virtual bool HasElement(Handle<Object> receiver, Handle<JSObject> holder,
- uint32_t key,
+ virtual bool HasElement(Handle<JSObject> holder, uint32_t key,
Handle<FixedArrayBase> backing_store) FINAL {
- return ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, backing_store);
+ return ElementsAccessorSubclass::HasElementImpl(holder, key, backing_store);
}
MUST_USE_RESULT virtual MaybeHandle<Object> Get(
@@ -632,14 +628,13 @@ class ElementsAccessorBase : public ElementsAccessor {
}
MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Handle<Object> receiver, Handle<JSObject> holder, uint32_t key,
+ Handle<JSObject> holder, uint32_t key,
Handle<FixedArrayBase> backing_store) FINAL {
- return ElementsAccessorSubclass::GetAttributesImpl(
- receiver, holder, key, backing_store);
+ return ElementsAccessorSubclass::GetAttributesImpl(holder, key,
+ backing_store);
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Handle<Object> receiver,
Handle<JSObject> obj,
uint32_t key,
Handle<FixedArrayBase> backing_store) {
@@ -652,14 +647,13 @@ class ElementsAccessorBase : public ElementsAccessor {
}
MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair(
- Handle<Object> receiver, Handle<JSObject> holder, uint32_t key,
+ Handle<JSObject> holder, uint32_t key,
Handle<FixedArrayBase> backing_store) FINAL {
- return ElementsAccessorSubclass::GetAccessorPairImpl(
- receiver, holder, key, backing_store);
+ return ElementsAccessorSubclass::GetAccessorPairImpl(holder, key,
+ backing_store);
}
MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
- Handle<Object> receiver,
Handle<JSObject> obj,
uint32_t key,
Handle<FixedArrayBase> backing_store) {
@@ -691,9 +685,8 @@ class ElementsAccessorBase : public ElementsAccessor {
}
MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) OVERRIDE = 0;
+ Handle<JSObject> obj, uint32_t key,
+ LanguageMode language_mode) OVERRIDE = 0;
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
FixedArrayBase* to, ElementsKind from_kind,
@@ -767,8 +760,7 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t extra = 0;
for (uint32_t y = 0; y < len1; y++) {
uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
- if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, from)) {
+ if (ElementsAccessorSubclass::HasElementImpl(holder, key, from)) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value,
@@ -805,8 +797,7 @@ class ElementsAccessorBase : public ElementsAccessor {
for (uint32_t y = 0; y < len1; y++) {
uint32_t key =
ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
- if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, from)) {
+ if (ElementsAccessorSubclass::HasElementImpl(holder, key, from)) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value,
@@ -915,9 +906,8 @@ class FastElementsAccessor
return length_object;
}
- static Handle<Object> DeleteCommon(Handle<JSObject> obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ static Handle<Object> DeleteCommon(Handle<JSObject> obj, uint32_t key,
+ LanguageMode language_mode) {
DCHECK(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
@@ -975,12 +965,11 @@ class FastElementsAccessor
}
virtual MaybeHandle<Object> Delete(Handle<JSObject> obj, uint32_t key,
- JSReceiver::DeleteMode mode) FINAL {
- return DeleteCommon(obj, key, mode);
+ LanguageMode language_mode) FINAL {
+ return DeleteCommon(obj, key, language_mode);
}
static bool HasElementImpl(
- Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key,
Handle<FixedArrayBase> backing_store) {
@@ -1279,7 +1268,6 @@ class TypedElementsAccessor
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Handle<Object> receiver,
Handle<JSObject> obj,
uint32_t key,
Handle<FixedArrayBase> backing_store) {
@@ -1298,14 +1286,12 @@ class TypedElementsAccessor
}
MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) FINAL {
+ Handle<JSObject> obj, uint32_t key, LanguageMode language_mode) FINAL {
// External arrays always ignore deletes.
return obj->GetIsolate()->factory()->true_value();
}
- static bool HasElementImpl(Handle<Object> receiver,
- Handle<JSObject> holder,
- uint32_t key,
+ static bool HasElementImpl(Handle<JSObject> holder, uint32_t key,
Handle<FixedArrayBase> backing_store) {
uint32_t capacity =
AccessorClass::GetCapacityImpl(backing_store);
@@ -1397,9 +1383,7 @@ class DictionaryElementsAccessor
}
MUST_USE_RESULT static MaybeHandle<Object> DeleteCommon(
- Handle<JSObject> obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ Handle<JSObject> obj, uint32_t key, LanguageMode language_mode) {
Isolate* isolate = obj->GetIsolate();
Handle<FixedArray> backing_store(FixedArray::cast(obj->elements()),
isolate);
@@ -1413,9 +1397,9 @@ class DictionaryElementsAccessor
int entry = dictionary->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
Handle<Object> result =
- SeededNumberDictionary::DeleteProperty(dictionary, entry, mode);
+ SeededNumberDictionary::DeleteProperty(dictionary, entry);
if (*result == *isolate->factory()->false_value()) {
- if (mode == JSObject::STRICT_DELETION) {
+ if (is_strict(language_mode)) {
// Deleting a non-configurable property in strict mode.
Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
Handle<Object> args[2] = { name, obj };
@@ -1450,8 +1434,8 @@ class DictionaryElementsAccessor
ElementsKindTraits<DICTIONARY_ELEMENTS> >;
MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) FINAL {
- return DeleteCommon(obj, key, mode);
+ Handle<JSObject> obj, uint32_t key, LanguageMode language_mode) FINAL {
+ return DeleteCommon(obj, key, language_mode);
}
MUST_USE_RESULT static MaybeHandle<Object> GetImpl(
@@ -1466,7 +1450,7 @@ class DictionaryElementsAccessor
if (entry != SeededNumberDictionary::kNotFound) {
Handle<Object> element(backing_store->ValueAt(entry), isolate);
PropertyDetails details = backing_store->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
+ if (details.type() == ACCESSOR_CONSTANT) {
return JSObject::GetElementWithCallback(
obj, receiver, element, key, obj);
} else {
@@ -1477,7 +1461,6 @@ class DictionaryElementsAccessor
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Handle<Object> receiver,
Handle<JSObject> obj,
uint32_t key,
Handle<FixedArrayBase> backing_store) {
@@ -1491,7 +1474,6 @@ class DictionaryElementsAccessor
}
MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
- Handle<Object> receiver,
Handle<JSObject> obj,
uint32_t key,
Handle<FixedArrayBase> store) {
@@ -1499,16 +1481,14 @@ class DictionaryElementsAccessor
Handle<SeededNumberDictionary>::cast(store);
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound &&
- backing_store->DetailsAt(entry).type() == CALLBACKS &&
+ backing_store->DetailsAt(entry).type() == ACCESSOR_CONSTANT &&
backing_store->ValueAt(entry)->IsAccessorPair()) {
return handle(AccessorPair::cast(backing_store->ValueAt(entry)));
}
return MaybeHandle<AccessorPair>();
}
- static bool HasElementImpl(Handle<Object> receiver,
- Handle<JSObject> holder,
- uint32_t key,
+ static bool HasElementImpl(Handle<JSObject> holder, uint32_t key,
Handle<FixedArrayBase> store) {
Handle<SeededNumberDictionary> backing_store =
Handle<SeededNumberDictionary>::cast(store);
@@ -1578,7 +1558,6 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Handle<Object> receiver,
Handle<JSObject> obj,
uint32_t key,
Handle<FixedArrayBase> backing_store) {
@@ -1589,13 +1568,12 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
} else {
// If not aliased, check the arguments.
Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
- return ElementsAccessor::ForArray(arguments)->GetAttributes(
- receiver, obj, key, arguments);
+ return ElementsAccessor::ForArray(arguments)
+ ->GetAttributes(obj, key, arguments);
}
}
MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
- Handle<Object> receiver,
Handle<JSObject> obj,
uint32_t key,
Handle<FixedArrayBase> parameters) {
@@ -1606,8 +1584,8 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
} else {
// If not aliased, check the arguments.
Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
- return ElementsAccessor::ForArray(arguments)->GetAccessorPair(
- receiver, obj, key, arguments);
+ return ElementsAccessor::ForArray(arguments)
+ ->GetAccessorPair(obj, key, arguments);
}
}
@@ -1622,7 +1600,7 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
}
MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) FINAL {
+ Handle<JSObject> obj, uint32_t key, LanguageMode language_mode) FINAL {
Isolate* isolate = obj->GetIsolate();
Handle<FixedArray> parameter_map(FixedArray::cast(obj->elements()));
Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
@@ -1634,12 +1612,14 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
} else {
Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
if (arguments->IsDictionary()) {
- return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
+ return DictionaryElementsAccessor::DeleteCommon(obj, key,
+ language_mode);
} else {
// It's difficult to access the version of DeleteCommon that is declared
// in the templatized super class, call the concrete implementation in
// the class for the most generalized ElementsKind subclass.
- return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
+ return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key,
+ language_mode);
}
}
return isolate->factory()->true_value();
@@ -1665,28 +1645,6 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
return index;
}
- static bool HasElementImpl(Handle<Object> receiver,
- Handle<JSObject> holder,
- uint32_t key,
- Handle<FixedArrayBase> parameters) {
- Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
- Handle<Object> probe = GetParameterMapArg(holder, parameter_map, key);
- if (!probe->IsTheHole()) {
- return true;
- } else {
- Isolate* isolate = holder->GetIsolate();
- Handle<FixedArrayBase> arguments(FixedArrayBase::cast(
- Handle<FixedArray>::cast(parameter_map)->get(1)), isolate);
- ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- accessor->Get(receiver, holder, key, arguments),
- false);
- return !value->IsTheHole();
- }
- }
-
private:
static Handle<Object> GetParameterMapArg(Handle<JSObject> holder,
Handle<FixedArray> parameter_map,
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 05354eaccf..1d80b25891 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -33,16 +33,14 @@ class ElementsAccessor {
// the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
// holder->elements() is used as the backing store.
virtual bool HasElement(
- Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key,
Handle<FixedArrayBase> backing_store) = 0;
inline bool HasElement(
- Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key) {
- return HasElement(receiver, holder, key, handle(holder->elements()));
+ return HasElement(holder, key, handle(holder->elements()));
}
// Returns the element with the specified key or undefined if there is no such
@@ -69,16 +67,14 @@ class ElementsAccessor {
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key,
Handle<FixedArrayBase> backing_store) = 0;
MUST_USE_RESULT inline PropertyAttributes GetAttributes(
- Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key) {
- return GetAttributes(receiver, holder, key, handle(holder->elements()));
+ return GetAttributes(holder, key, handle(holder->elements()));
}
// Returns an element's accessors, or NULL if the element does not exist or
@@ -87,16 +83,14 @@ class ElementsAccessor {
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair(
- Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key,
Handle<FixedArrayBase> backing_store) = 0;
MUST_USE_RESULT inline MaybeHandle<AccessorPair> GetAccessorPair(
- Handle<Object> receiver,
Handle<JSObject> holder,
uint32_t key) {
- return GetAccessorPair(receiver, holder, key, handle(holder->elements()));
+ return GetAccessorPair(holder, key, handle(holder->elements()));
}
// Modifies the length data property as specified for JSArrays and resizes the
@@ -121,9 +115,7 @@ class ElementsAccessor {
// Deletes an element in an object, returning a new elements backing store.
MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
- Handle<JSObject> holder,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
+ Handle<JSObject> holder, uint32_t key, LanguageMode language_mode) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index a85effd403..9dfef372bc 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -37,10 +37,15 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) {
static void PrintDeserializedCodeInfo(Handle<JSFunction> function) {
if (function->code() == function->shared()->code() &&
function->shared()->deserialized()) {
- PrintF("Running deserialized script ");
+ PrintF("[Running deserialized script");
Object* script = function->shared()->script();
- if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
- PrintF("\n");
+ if (script->IsScript()) {
+ Object* name = Script::cast(script)->name();
+ if (name->IsString()) {
+ PrintF(": %s", String::cast(name)->ToCString().get());
+ }
+ }
+ PrintF("]\n");
}
}
@@ -53,6 +58,26 @@ MUST_USE_RESULT static MaybeHandle<Object> Invoke(
Handle<Object> args[]) {
Isolate* isolate = function->GetIsolate();
+ // api callbacks can be called directly.
+ if (!is_construct && function->shared()->IsApiFunction()) {
+ SaveContext save(isolate);
+ isolate->set_context(function->context());
+ if (receiver->IsGlobalObject()) {
+ receiver = handle(Handle<GlobalObject>::cast(receiver)->global_proxy());
+ }
+ DCHECK(function->context()->global_object()->IsGlobalObject());
+ auto value = Builtins::InvokeApiFunction(function, receiver, argc, args);
+ bool has_exception = value.is_null();
+ DCHECK(has_exception == isolate->has_pending_exception());
+ if (has_exception) {
+ isolate->ReportPendingMessages();
+ return MaybeHandle<Object>();
+ } else {
+ isolate->clear_pending_message();
+ }
+ return value;
+ }
+
// Entering JavaScript.
VMState<JS> state(isolate);
CHECK(AllowJavascriptExecution::IsAllowed(isolate));
@@ -104,7 +129,9 @@ MUST_USE_RESULT static MaybeHandle<Object> Invoke(
}
#ifdef VERIFY_HEAP
- value->ObjectVerify();
+ if (FLAG_verify_heap) {
+ value->ObjectVerify();
+ }
#endif
// Update the pending exception flag and return the value.
@@ -139,8 +166,7 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate,
// In sloppy mode, convert receiver.
if (convert_receiver && !receiver->IsJSReceiver() &&
- !func->shared()->native() &&
- func->shared()->strict_mode() == SLOPPY) {
+ !func->shared()->native() && is_sloppy(func->shared()->language_mode())) {
if (receiver->IsUndefined() || receiver->IsNull()) {
receiver = handle(func->global_proxy());
DCHECK(!receiver->IsJSBuiltinsObject());
@@ -603,73 +629,6 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
}
-MaybeHandle<JSFunction> Execution::InstantiateFunction(
- Handle<FunctionTemplateInfo> data) {
- Isolate* isolate = data->GetIsolate();
- if (!data->do_not_cache()) {
- // Fast case: see if the function has already been instantiated
- int serial_number = Smi::cast(data->serial_number())->value();
- Handle<JSObject> cache(isolate->native_context()->function_cache());
- Handle<Object> elm =
- Object::GetElement(isolate, cache, serial_number).ToHandleChecked();
- if (elm->IsJSFunction()) return Handle<JSFunction>::cast(elm);
- }
- // The function has not yet been instantiated in this context; do it.
- Handle<Object> args[] = { data };
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Call(isolate,
- isolate->instantiate_fun(),
- isolate->js_builtins_object(),
- arraysize(args),
- args),
- JSFunction);
- return Handle<JSFunction>::cast(result);
-}
-
-
-MaybeHandle<JSObject> Execution::InstantiateObject(
- Handle<ObjectTemplateInfo> data) {
- Isolate* isolate = data->GetIsolate();
- Handle<Object> result;
- if (data->property_list()->IsUndefined() &&
- !data->constructor()->IsUndefined()) {
- Handle<FunctionTemplateInfo> cons_template =
- Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(data->constructor()));
- Handle<JSFunction> cons;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, cons, InstantiateFunction(cons_template), JSObject);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result, New(cons, 0, NULL), JSObject);
- } else {
- Handle<Object> args[] = { data };
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Call(isolate,
- isolate->instantiate_fun(),
- isolate->js_builtins_object(),
- arraysize(args),
- args),
- JSObject);
- }
- return Handle<JSObject>::cast(result);
-}
-
-
-MaybeHandle<Object> Execution::ConfigureInstance(
- Isolate* isolate,
- Handle<Object> instance,
- Handle<Object> instance_template) {
- Handle<Object> args[] = { instance, instance_template };
- return Execution::Call(isolate,
- isolate->configure_instance_fun(),
- isolate->js_builtins_object(),
- arraysize(args),
- args);
-}
-
-
Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
Handle<Object> pos,
@@ -692,7 +651,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Object* StackGuard::HandleInterrupts() {
if (CheckAndClearInterrupt(GC_REQUEST)) {
- isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
+ isolate_->heap()->HandleGCRequest();
}
if (CheckDebugBreak() || CheckDebugCommand()) {
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index ae263bddd7..47cbb08f03 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -98,12 +98,6 @@ class Execution FINAL : public AllStatic {
static Handle<Object> CharAt(Handle<String> str, uint32_t index);
static Handle<Object> GetFunctionFor();
- MUST_USE_RESULT static MaybeHandle<JSFunction> InstantiateFunction(
- Handle<FunctionTemplateInfo> data);
- MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
- Handle<ObjectTemplateInfo> data);
- MUST_USE_RESULT static MaybeHandle<Object> ConfigureInstance(
- Isolate* isolate, Handle<Object> instance, Handle<Object> data);
static Handle<String> GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
Handle<Object> pos,
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index bb5ee333f6..2c1f91d399 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -62,86 +62,81 @@ void StatisticsExtension::GetCounters(
Counters* counters = isolate->counters();
v8::Local<v8::Object> result = v8::Object::New(args.GetIsolate());
-#define ADD_COUNTER(name, caption) \
- AddCounter(args.GetIsolate(), result, counters->name(), #name);
-
- STATS_COUNTER_LIST_1(ADD_COUNTER)
- STATS_COUNTER_LIST_2(ADD_COUNTER)
+ struct StatisticsCounter {
+ v8::internal::StatsCounter* counter;
+ const char* name;
+ };
+ const StatisticsCounter counter_list[] = {
+#define ADD_COUNTER(name, caption) \
+ { counters->name(), #name } \
+ ,
+
+ STATS_COUNTER_LIST_1(ADD_COUNTER) STATS_COUNTER_LIST_2(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(args.GetIsolate(), result, counters->count_of_##name(), \
- "count_of_" #name); \
- AddCounter(args.GetIsolate(), result, counters->size_of_##name(), \
- "size_of_" #name);
+#define ADD_COUNTER(name) \
+ { counters->count_of_##name(), "count_of_" #name } \
+ , {counters->size_of_##name(), "size_of_" #name},
- INSTANCE_TYPE_LIST(ADD_COUNTER)
+ INSTANCE_TYPE_LIST(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(args.GetIsolate(), result, counters->count_of_CODE_TYPE_##name(), \
- "count_of_CODE_TYPE_" #name); \
- AddCounter(args.GetIsolate(), result, counters->size_of_CODE_TYPE_##name(), \
- "size_of_CODE_TYPE_" #name);
+#define ADD_COUNTER(name) \
+ { counters->count_of_CODE_TYPE_##name(), "count_of_CODE_TYPE_" #name } \
+ , {counters->size_of_CODE_TYPE_##name(), "size_of_CODE_TYPE_" #name},
- CODE_KIND_LIST(ADD_COUNTER)
+ CODE_KIND_LIST(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(args.GetIsolate(), result, \
- counters->count_of_FIXED_ARRAY_##name(), \
- "count_of_FIXED_ARRAY_" #name); \
- AddCounter(args.GetIsolate(), result, \
- counters->size_of_FIXED_ARRAY_##name(), \
- "size_of_FIXED_ARRAY_" #name);
-
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
+#define ADD_COUNTER(name) \
+ { counters->count_of_FIXED_ARRAY_##name(), "count_of_FIXED_ARRAY_" #name } \
+ , {counters->size_of_FIXED_ARRAY_##name(), "size_of_FIXED_ARRAY_" #name},
+
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
#undef ADD_COUNTER
+ }; // End counter_list array.
+
+ for (size_t i = 0; i < arraysize(counter_list); i++) {
+ AddCounter(args.GetIsolate(), result, counter_list[i].counter,
+ counter_list[i].name);
+ }
+
+ struct StatisticNumber {
+ intptr_t number;
+ const char* name;
+ };
+
+ const StatisticNumber numbers[] = {
+ {isolate->memory_allocator()->Size(), "total_committed_bytes"},
+ {heap->new_space()->Size(), "new_space_live_bytes"},
+ {heap->new_space()->Available(), "new_space_available_bytes"},
+ {heap->new_space()->CommittedMemory(), "new_space_commited_bytes"},
+ {heap->old_pointer_space()->Size(), "old_pointer_space_live_bytes"},
+ {heap->old_pointer_space()->Available(),
+ "old_pointer_space_available_bytes"},
+ {heap->old_pointer_space()->CommittedMemory(),
+ "old_pointer_space_commited_bytes"},
+ {heap->old_data_space()->Size(), "old_data_space_live_bytes"},
+ {heap->old_data_space()->Available(), "old_data_space_available_bytes"},
+ {heap->old_data_space()->CommittedMemory(),
+ "old_data_space_commited_bytes"},
+ {heap->code_space()->Size(), "code_space_live_bytes"},
+ {heap->code_space()->Available(), "code_space_available_bytes"},
+ {heap->code_space()->CommittedMemory(), "code_space_commited_bytes"},
+ {heap->cell_space()->Size(), "cell_space_live_bytes"},
+ {heap->cell_space()->Available(), "cell_space_available_bytes"},
+ {heap->cell_space()->CommittedMemory(), "cell_space_commited_bytes"},
+ {heap->property_cell_space()->Size(), "property_cell_space_live_bytes"},
+ {heap->property_cell_space()->Available(),
+ "property_cell_space_available_bytes"},
+ {heap->property_cell_space()->CommittedMemory(),
+ "property_cell_space_commited_bytes"},
+ {heap->lo_space()->Size(), "lo_space_live_bytes"},
+ {heap->lo_space()->Available(), "lo_space_available_bytes"},
+ {heap->lo_space()->CommittedMemory(), "lo_space_commited_bytes"},
+ };
+
+ for (size_t i = 0; i < arraysize(numbers); i++) {
+ AddNumber(args.GetIsolate(), result, numbers[i].number, numbers[i].name);
+ }
- AddNumber(args.GetIsolate(), result, isolate->memory_allocator()->Size(),
- "total_committed_bytes");
- AddNumber(args.GetIsolate(), result, heap->new_space()->Size(),
- "new_space_live_bytes");
- AddNumber(args.GetIsolate(), result, heap->new_space()->Available(),
- "new_space_available_bytes");
- AddNumber(args.GetIsolate(), result, heap->new_space()->CommittedMemory(),
- "new_space_commited_bytes");
- AddNumber(args.GetIsolate(), result, heap->old_pointer_space()->Size(),
- "old_pointer_space_live_bytes");
- AddNumber(args.GetIsolate(), result, heap->old_pointer_space()->Available(),
- "old_pointer_space_available_bytes");
- AddNumber(args.GetIsolate(), result,
- heap->old_pointer_space()->CommittedMemory(),
- "old_pointer_space_commited_bytes");
- AddNumber(args.GetIsolate(), result, heap->old_data_space()->Size(),
- "old_data_space_live_bytes");
- AddNumber(args.GetIsolate(), result, heap->old_data_space()->Available(),
- "old_data_space_available_bytes");
- AddNumber(args.GetIsolate(), result,
- heap->old_data_space()->CommittedMemory(),
- "old_data_space_commited_bytes");
- AddNumber(args.GetIsolate(), result, heap->code_space()->Size(),
- "code_space_live_bytes");
- AddNumber(args.GetIsolate(), result, heap->code_space()->Available(),
- "code_space_available_bytes");
- AddNumber(args.GetIsolate(), result, heap->code_space()->CommittedMemory(),
- "code_space_commited_bytes");
- AddNumber(args.GetIsolate(), result, heap->cell_space()->Size(),
- "cell_space_live_bytes");
- AddNumber(args.GetIsolate(), result, heap->cell_space()->Available(),
- "cell_space_available_bytes");
- AddNumber(args.GetIsolate(), result, heap->cell_space()->CommittedMemory(),
- "cell_space_commited_bytes");
- AddNumber(args.GetIsolate(), result, heap->property_cell_space()->Size(),
- "property_cell_space_live_bytes");
- AddNumber(args.GetIsolate(), result, heap->property_cell_space()->Available(),
- "property_cell_space_available_bytes");
- AddNumber(args.GetIsolate(), result,
- heap->property_cell_space()->CommittedMemory(),
- "property_cell_space_commited_bytes");
- AddNumber(args.GetIsolate(), result, heap->lo_space()->Size(),
- "lo_space_live_bytes");
- AddNumber(args.GetIsolate(), result, heap->lo_space()->Available(),
- "lo_space_available_bytes");
- AddNumber(args.GetIsolate(), result, heap->lo_space()->CommittedMemory(),
- "lo_space_commited_bytes");
AddNumber64(args.GetIsolate(), result,
heap->amount_of_external_allocated_memory(),
"amount_of_external_allocated_memory");
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index ba623411ff..a07d656d75 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -240,7 +240,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
decoder(isolate()->unicode_cache()->utf8_decoder());
decoder->Reset(string.start() + non_ascii_start,
length - non_ascii_start);
- int utf16_length = decoder->Utf16Length();
+ int utf16_length = static_cast<int>(decoder->Utf16Length());
DCHECK(utf16_length > 0);
// Allocate string.
Handle<SeqTwoByteString> result;
@@ -816,21 +816,6 @@ Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
}
-Handle<DeclaredAccessorDescriptor> Factory::NewDeclaredAccessorDescriptor() {
- return Handle<DeclaredAccessorDescriptor>::cast(
- NewStruct(DECLARED_ACCESSOR_DESCRIPTOR_TYPE));
-}
-
-
-Handle<DeclaredAccessorInfo> Factory::NewDeclaredAccessorInfo() {
- Handle<DeclaredAccessorInfo> info =
- Handle<DeclaredAccessorInfo>::cast(
- NewStruct(DECLARED_ACCESSOR_INFO_TYPE));
- info->set_flag(0); // Must clear the flag, it was initialized as undefined.
- return info;
-}
-
-
Handle<ExecutableAccessorInfo> Factory::NewExecutableAccessorInfo() {
Handle<ExecutableAccessorInfo> info =
Handle<ExecutableAccessorInfo>::cast(
@@ -1273,7 +1258,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
MaybeHandle<Code> code) {
Handle<Context> context(isolate()->native_context());
Handle<SharedFunctionInfo> info = NewSharedFunctionInfo(name, code);
- DCHECK((info->strict_mode() == SLOPPY) &&
+ DCHECK(is_sloppy(info->language_mode()) &&
(map.is_identical_to(isolate()->sloppy_function_map()) ||
map.is_identical_to(
isolate()->sloppy_function_without_prototype_map()) ||
@@ -1376,8 +1361,7 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
static bool ShouldOptimizeNewClosure(Isolate* isolate,
Handle<SharedFunctionInfo> info) {
return isolate->use_crankshaft() && !info->is_toplevel() &&
- info->is_compiled() && info->allows_lazy_compilation() &&
- !info->optimization_disabled() && !isolate->DebuggerHasBreakPoints();
+ info->is_compiled() && info->allows_lazy_compilation();
}
@@ -1385,7 +1369,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info,
Handle<Context> context,
PretenureFlag pretenure) {
- int map_index = Context::FunctionMapIndex(info->strict_mode(), info->kind());
+ int map_index =
+ Context::FunctionMapIndex(info->language_mode(), info->kind());
Handle<Map> map(Map::cast(context->native_context()->get(map_index)));
Handle<JSFunction> result = NewFunction(map, info, context, pretenure);
@@ -1591,8 +1576,9 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
Handle<DescriptorArray> descs(map->instance_descriptors());
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
- DCHECK(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d(details.attributes(), CALLBACKS, i + 1);
+ // Only accessors are expected.
+ DCHECK_EQ(ACCESSOR_CONSTANT, details.type());
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1);
Handle<Name> name(descs->GetKey(i));
Handle<Object> value(descs->GetCallbacksObject(i), isolate());
Handle<PropertyCell> cell = NewPropertyCell(value);
@@ -1973,6 +1959,18 @@ void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
}
+Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy() {
+ // Create an empty shell of a JSGlobalProxy that needs to be reinitialized
+ // via ReinitializeJSGlobalProxy later.
+ Handle<Map> map = NewMap(JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
+ // Maintain invariant expected from any JSGlobalProxy.
+ map->set_is_access_check_needed(true);
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateJSObjectFromMap(
+ *map, NOT_TENURED, false),
+ JSGlobalProxy);
+}
+
+
void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
Handle<JSFunction> constructor) {
DCHECK(constructor->has_initial_map());
@@ -2106,7 +2104,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
- share->set_formal_parameter_count(0);
+ share->set_internal_formal_parameter_count(0);
share->set_expected_nof_properties(0);
share->set_num_literals(0);
share->set_start_position_and_type(0);
@@ -2152,12 +2150,6 @@ void Factory::SetNumberStringCache(Handle<Object> number,
if (number_string_cache()->get(hash * 2) != *undefined_value()) {
int full_size = isolate()->heap()->FullSizeNumberStringCacheLength();
if (number_string_cache()->length() != full_size) {
- // The first time we have a hash collision, we move to the full sized
- // number string cache. The idea is to have a small number string
- // cache in the snapshot to keep boot-time memory usage down.
- // If we expand the number string cache already while creating
- // the snapshot then that didn't work out.
- DCHECK(!isolate()->serializer_enabled());
Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
isolate()->heap()->set_number_string_cache(*new_cache);
return;
@@ -2228,7 +2220,8 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
- bool strict_mode_callee = callee->shared()->strict_mode() == STRICT;
+ bool strict_mode_callee = is_strict(callee->shared()->language_mode()) ||
+ !callee->is_simple_parameter_list();
Handle<Map> map = strict_mode_callee ? isolate()->strict_arguments_map()
: isolate()->sloppy_arguments_map();
@@ -2245,182 +2238,12 @@ Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
}
-Handle<JSFunction> Factory::CreateApiFunction(
- Handle<FunctionTemplateInfo> obj,
- Handle<Object> prototype,
- ApiInstanceType instance_type) {
- Handle<Code> code = isolate()->builtins()->HandleApiCall();
- Handle<Code> construct_stub = isolate()->builtins()->JSConstructStubApi();
-
- Handle<JSFunction> result;
- if (obj->remove_prototype()) {
- result = NewFunctionWithoutPrototype(empty_string(), code);
- } else {
- int internal_field_count = 0;
- if (!obj->instance_template()->IsUndefined()) {
- Handle<ObjectTemplateInfo> instance_template =
- Handle<ObjectTemplateInfo>(
- ObjectTemplateInfo::cast(obj->instance_template()));
- internal_field_count =
- Smi::cast(instance_template->internal_field_count())->value();
- }
-
- // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
- // JSObject::GetHeaderSize.
- int instance_size = kPointerSize * internal_field_count;
- InstanceType type;
- switch (instance_type) {
- case JavaScriptObjectType:
- type = JS_OBJECT_TYPE;
- instance_size += JSObject::kHeaderSize;
- break;
- case GlobalObjectType:
- type = JS_GLOBAL_OBJECT_TYPE;
- instance_size += JSGlobalObject::kSize;
- break;
- case GlobalProxyType:
- type = JS_GLOBAL_PROXY_TYPE;
- instance_size += JSGlobalProxy::kSize;
- break;
- default:
- UNREACHABLE();
- type = JS_OBJECT_TYPE; // Keep the compiler happy.
- break;
- }
-
- result = NewFunction(empty_string(), code, prototype, type, instance_size,
- obj->read_only_prototype(), true);
- }
-
- result->shared()->set_length(obj->length());
- Handle<Object> class_name(obj->class_name(), isolate());
- if (class_name->IsString()) {
- result->shared()->set_instance_class_name(*class_name);
- result->shared()->set_name(*class_name);
- }
- result->shared()->set_function_data(*obj);
- result->shared()->set_construct_stub(*construct_stub);
- result->shared()->DontAdaptArguments();
-
- if (obj->remove_prototype()) {
- DCHECK(result->shared()->IsApiFunction());
- DCHECK(!result->has_initial_map());
- DCHECK(!result->has_prototype());
- return result;
- }
-
-#ifdef DEBUG
- LookupIterator it(handle(JSObject::cast(result->prototype())),
- constructor_string(), LookupIterator::OWN_SKIP_INTERCEPTOR);
- MaybeHandle<Object> maybe_prop = Object::GetProperty(&it);
- DCHECK(it.IsFound());
- DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result));
-#endif
-
- // Down from here is only valid for API functions that can be used as a
- // constructor (don't set the "remove prototype" flag).
-
- Handle<Map> map(result->initial_map());
-
- // Mark as undetectable if needed.
- if (obj->undetectable()) {
- map->set_is_undetectable();
- }
-
- // Mark as hidden for the __proto__ accessor if needed.
- if (obj->hidden_prototype()) {
- map->set_is_hidden_prototype();
- }
-
- // Mark as needs_access_check if needed.
- if (obj->needs_access_check()) {
- map->set_is_access_check_needed(true);
- }
-
- // Set interceptor information in the map.
- if (!obj->named_property_handler()->IsUndefined()) {
- map->set_has_named_interceptor();
- }
- if (!obj->indexed_property_handler()->IsUndefined()) {
- map->set_has_indexed_interceptor();
- }
-
- // Set instance call-as-function information in the map.
- if (!obj->instance_call_handler()->IsUndefined()) {
- map->set_has_instance_call_handler();
- }
-
- // Recursively copy parent instance templates' accessors,
- // 'data' may be modified.
- int max_number_of_additional_properties = 0;
- int max_number_of_static_properties = 0;
- FunctionTemplateInfo* info = *obj;
- while (true) {
- if (!info->instance_template()->IsUndefined()) {
- Object* props =
- ObjectTemplateInfo::cast(
- info->instance_template())->property_accessors();
- if (!props->IsUndefined()) {
- Handle<Object> props_handle(props, isolate());
- NeanderArray props_array(props_handle);
- max_number_of_additional_properties += props_array.length();
- }
- }
- if (!info->property_accessors()->IsUndefined()) {
- Object* props = info->property_accessors();
- if (!props->IsUndefined()) {
- Handle<Object> props_handle(props, isolate());
- NeanderArray props_array(props_handle);
- max_number_of_static_properties += props_array.length();
- }
- }
- Object* parent = info->parent_template();
- if (parent->IsUndefined()) break;
- info = FunctionTemplateInfo::cast(parent);
- }
-
- Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
-
- // Use a temporary FixedArray to acculumate static accessors
- int valid_descriptors = 0;
- Handle<FixedArray> array;
- if (max_number_of_static_properties > 0) {
- array = NewFixedArray(max_number_of_static_properties);
- }
-
- while (true) {
- // Install instance descriptors
- if (!obj->instance_template()->IsUndefined()) {
- Handle<ObjectTemplateInfo> instance =
- Handle<ObjectTemplateInfo>(
- ObjectTemplateInfo::cast(obj->instance_template()), isolate());
- Handle<Object> props = Handle<Object>(instance->property_accessors(),
- isolate());
- if (!props->IsUndefined()) {
- Map::AppendCallbackDescriptors(map, props);
- }
- }
- // Accumulate static accessors
- if (!obj->property_accessors()->IsUndefined()) {
- Handle<Object> props = Handle<Object>(obj->property_accessors(),
- isolate());
- valid_descriptors =
- AccessorInfo::AppendUnique(props, array, valid_descriptors);
- }
- // Climb parent chain
- Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate());
- if (parent->IsUndefined()) break;
- obj = Handle<FunctionTemplateInfo>::cast(parent);
- }
-
- // Install accumulated static accessors
- for (int i = 0; i < valid_descriptors; i++) {
- Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
- JSObject::SetAccessor(result, accessor).Assert();
- }
-
- DCHECK(result->shared()->IsApiFunction());
- return result;
+Handle<JSWeakMap> Factory::NewJSWeakMap() {
+ // TODO(adamk): Currently the map is only created three times per
+ // isolate. If it's created more often, the map should be moved into the
+ // strong root list.
+ Handle<Map> map = NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
+ return Handle<JSWeakMap>::cast(NewJSObjectFromMap(map));
}
@@ -2500,21 +2323,6 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
}
-MaybeHandle<FunctionTemplateInfo> Factory::ConfigureInstance(
- Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance) {
- // Configure the instance by adding the properties specified by the
- // instance template.
- Handle<Object> instance_template(desc->instance_template(), isolate());
- if (!instance_template->IsUndefined()) {
- RETURN_ON_EXCEPTION(
- isolate(),
- Execution::ConfigureInstance(isolate(), instance, instance_template),
- FunctionTemplateInfo);
- }
- return desc;
-}
-
-
Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
if (String::Equals(name, undefined_string())) return undefined_value();
if (String::Equals(name, nan_string())) return nan_value();
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 24a66478a3..4dfd98c61c 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -264,10 +264,6 @@ class Factory FINAL {
Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
int aliased_context_slot);
- Handle<DeclaredAccessorDescriptor> NewDeclaredAccessorDescriptor();
-
- Handle<DeclaredAccessorInfo> NewDeclaredAccessorInfo();
-
Handle<ExecutableAccessorInfo> NewExecutableAccessorInfo();
Handle<Script> NewScript(Handle<String> source);
@@ -363,6 +359,8 @@ class Factory FINAL {
return NewJSObjectFromMap(neander_map());
}
+ Handle<JSWeakMap> NewJSWeakMap();
+
Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
// JS objects are pretenured when allocated by the bootstrapper and
@@ -469,6 +467,8 @@ class Factory FINAL {
void ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> global,
Handle<JSFunction> constructor);
+ Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy();
+
// Change the type of the argument into a JS object/function and reinitialize.
void BecomeJSObject(Handle<JSProxy> object);
void BecomeJSFunction(Handle<JSProxy> object);
@@ -561,25 +561,8 @@ class Factory FINAL {
return NumberToString(NewNumberFromUint(value));
}
- enum ApiInstanceType {
- JavaScriptObjectType,
- GlobalObjectType,
- GlobalProxyType
- };
-
- Handle<JSFunction> CreateApiFunction(
- Handle<FunctionTemplateInfo> data,
- Handle<Object> prototype,
- ApiInstanceType type = JavaScriptObjectType);
-
Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
- // Installs interceptors on the instance. 'desc' is a function template,
- // and instance is an object instance created by the function of this
- // function template.
- MUST_USE_RESULT MaybeHandle<FunctionTemplateInfo> ConfigureInstance(
- Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance);
-
#define ROOT_ACCESSOR(type, name, camel_name) \
inline Handle<type> name() { \
return Handle<type>(bit_cast<type**>( \
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index 198422feef..c151ab1072 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -14,15 +14,10 @@ namespace internal {
inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Map* map) {
DCHECK((offset % kPointerSize) == 0);
int index = offset / kPointerSize;
- if (map == NULL) {
- return FieldIndex(true, index, false, index + 1, 0, true);
- }
- int first_inobject_offset = map->GetInObjectPropertyOffset(0);
- if (offset < first_inobject_offset) {
- return FieldIndex(true, index, false, 0, 0, true);
- } else {
- return FieldIndex::ForPropertyIndex(map, offset / kPointerSize);
- }
+ DCHECK(map == NULL ||
+ index < (map->GetInObjectPropertyOffset(0) / kPointerSize +
+ map->inobject_properties()));
+ return FieldIndex(true, index, false, 0, 0, true);
}
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 348a52e73a..46d8aa94de 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -114,6 +114,11 @@ struct MaybeBoolFlag {
};
#endif
+#ifdef DEBUG
+#define DEBUG_BOOL true
+#else
+#define DEBUG_BOOL false
+#endif
#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
#define ENABLE_VFP3_DEFAULT true
#else
@@ -164,6 +169,11 @@ struct MaybeBoolFlag {
// Flags for language modes and experimental language features.
DEFINE_BOOL(use_strict, false, "enforce strict mode")
+DEFINE_BOOL(use_strong, false, "enforce strong mode")
+DEFINE_IMPLICATION(use_strong, use_strict)
+
+DEFINE_BOOL(strong_mode, false, "experimental strong language mode")
+DEFINE_IMPLICATION(use_strong, strong_mode)
DEFINE_BOOL(es_staging, false, "enable all completed harmony features")
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
@@ -172,29 +182,32 @@ DEFINE_IMPLICATION(harmony, es_staging)
DEFINE_IMPLICATION(es_staging, harmony)
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V) \
- V(harmony_modules, "harmony modules (implies block scoping)") \
- V(harmony_arrays, "harmony array methods") \
- V(harmony_array_includes, "harmony Array.prototype.includes") \
- V(harmony_regexps, "harmony regular expression extensions") \
- V(harmony_arrow_functions, "harmony arrow functions") \
- V(harmony_proxies, "harmony proxies") \
- V(harmony_sloppy, "harmony features in sloppy mode") \
- V(harmony_unicode, "harmony unicode escapes")
+#define HARMONY_INPROGRESS(V) \
+ V(harmony_modules, "harmony modules (implies block scoping)") \
+ V(harmony_arrays, "harmony array methods") \
+ V(harmony_array_includes, "harmony Array.prototype.includes") \
+ V(harmony_regexps, "harmony regular expression extensions") \
+ V(harmony_arrow_functions, "harmony arrow functions") \
+ V(harmony_proxies, "harmony proxies") \
+ V(harmony_sloppy, "harmony features in sloppy mode") \
+ V(harmony_unicode, "harmony unicode escapes") \
+ V(harmony_unicode_regexps, "harmony unicode regexps") \
+ V(harmony_computed_property_names, "harmony computed property names") \
+ V(harmony_rest_parameters, "harmony rest parameters") \
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED(V) \
V(harmony_tostring, "harmony toString") \
- V(harmony_classes, \
- "harmony classes (implies block scoping & object literal extension)") \
- V(harmony_object_literals, "harmony object literal extensions")
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING(V) \
V(harmony_numeric_literals, "harmony numeric literals") \
V(harmony_strings, "harmony string methods") \
V(harmony_scoping, "harmony block scoping") \
- V(harmony_templates, "harmony template literals")
+ V(harmony_templates, "harmony template literals") \
+ V(harmony_classes, \
+ "harmony classes (implies block scoping & object literal extension)") \
+ V(harmony_object_literals, "harmony object literal extensions") \
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -224,6 +237,7 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
DEFINE_IMPLICATION(harmony_modules, harmony_scoping)
DEFINE_IMPLICATION(harmony_classes, harmony_scoping)
DEFINE_IMPLICATION(harmony_classes, harmony_object_literals)
+DEFINE_IMPLICATION(harmony_unicode_regexps, harmony_unicode)
// Flags for experimental implementation features.
@@ -384,30 +398,33 @@ DEFINE_BOOL(trace_turbo_scheduler, false, "trace TurboFan's scheduler")
DEFINE_BOOL(trace_turbo_reduction, false, "trace TurboFan's various reducers")
DEFINE_BOOL(trace_turbo_jt, false, "trace TurboFan's jump threading")
DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
-DEFINE_BOOL(turbo_verify, false, "verify TurboFan graphs at each phase")
+DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
+DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
DEFINE_BOOL(turbo_types, true, "use typed lowering in TurboFan")
DEFINE_BOOL(turbo_source_positions, false,
"track source code positions when building TurboFan IR")
+DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
DEFINE_BOOL(context_specialization, false,
"enable context specialization in TurboFan")
DEFINE_BOOL(turbo_deoptimization, false, "enable deoptimization in TurboFan")
DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
-DEFINE_BOOL(turbo_inlining_intrinsics, false,
- "enable inlining of intrinsics in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
-DEFINE_IMPLICATION(turbo_inlining_intrinsics, turbo_inlining)
DEFINE_IMPLICATION(turbo_inlining, turbo_types)
DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
// TODO(dcarney): this is just for experimentation, remove when default.
-DEFINE_BOOL(turbo_reuse_spill_slots, true, "reuse spill slots in TurboFan")
-// TODO(dcarney): this is just for experimentation, remove when default.
DEFINE_BOOL(turbo_delay_ssa_decon, false,
"delay ssa deconstruction in TurboFan register allocator")
-// TODO(dcarney): this is just for debugging, remove eventually.
+DEFINE_BOOL(turbo_verify_allocation, DEBUG_BOOL,
+ "verify register allocation in TurboFan")
DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
-DEFINE_BOOL(turbo_jt, true, "enable jump threading")
+DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
+DEFINE_BOOL(turbo_osr, false, "enable OSR in TurboFan")
+DEFINE_BOOL(turbo_exceptions, false, "enable exception handling in TurboFan")
+DEFINE_BOOL(turbo_stress_loop_peeling, false,
+ "stress loop peeling optimization")
+DEFINE_BOOL(turbo_switch, true, "optimize switches in TurboFan")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
@@ -458,7 +475,8 @@ DEFINE_BOOL(enable_32dregs, ENABLE_32DREGS_DEFAULT,
DEFINE_BOOL(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
DEFINE_BOOL(force_long_branches, false,
- "force all emitted branches to be in long mode (MIPS only)")
+ "force all emitted branches to be in long mode (MIPS/PPC only)")
+DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
// bootstrapper.cc
DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
@@ -596,11 +614,18 @@ DEFINE_BOOL(age_code, true,
"old code (required for code flushing)")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_steps, true, "do incremental marking steps")
+DEFINE_BOOL(overapproximate_weak_closure, false,
+ "overapproximate weak closer to reduce atomic pause time")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
"track object counts and memory usage")
+DEFINE_BOOL(track_detached_contexts, true,
+ "track native contexts that are expected to be garbage collected")
+DEFINE_BOOL(trace_detached_contexts, false,
+ "trace native contexts that are expected to be garbage collected")
+DEFINE_IMPLICATION(trace_detached_contexts, track_detached_contexts)
#ifdef VERIFY_HEAP
DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
#endif
@@ -661,7 +686,8 @@ DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
DEFINE_BOOL(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
-#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64)
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC64)
DEFINE_INT(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
@@ -803,11 +829,6 @@ DEFINE_BOOL(print_global_handles, false, "report global handles after GC")
DEFINE_BOOL(print_turbo_replay, false,
"print C++ code to recreate TurboFan graphs")
-// interface.cc
-DEFINE_BOOL(print_interfaces, false, "print interfaces")
-DEFINE_BOOL(print_interface_details, false, "print interface inference details")
-DEFINE_INT(print_interface_depth, 5, "depth for printing interfaces")
-
// objects.cc
DEFINE_BOOL(trace_normalization, false,
"prints when objects are turned into dictionaries.")
@@ -850,6 +871,8 @@ DEFINE_BOOL(log_snapshot_positions, false,
DEFINE_BOOL(log_suspect, false, "Log suspect operations.")
DEFINE_BOOL(prof, false,
"Log statistical profiling information (implies --log-code).")
+DEFINE_BOOL(prof_cpp, false, "Like --prof, but ignore generated code.")
+DEFINE_IMPLICATION(prof, prof_cpp)
DEFINE_BOOL(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_BOOL(log_regexp, false, "Log regular expression execution.")
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 5e33bdaeec..d498c28240 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -553,6 +553,9 @@ void FlagList::EnforceFlagImplications() {
uint32_t FlagList::Hash() {
std::ostringstream modified_args_as_string;
+#ifdef DEBUG
+ modified_args_as_string << "debug";
+#endif // DEBUG
for (size_t i = 0; i < num_flags; ++i) {
Flag* current = &flags[i];
if (!current->IsDefault()) {
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index d7f2f75d36..824c1a7620 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -17,6 +17,8 @@
#include "src/arm64/frames-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/frames-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/frames-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/frames-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 0ba8ea0020..b7fba653d5 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -754,7 +754,7 @@ int JavaScriptFrame::GetNumberOfIncomingArguments() const {
DCHECK(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
- return function()->shared()->formal_parameter_count();
+ return function()->shared()->internal_formal_parameter_count();
}
@@ -1310,7 +1310,7 @@ void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
int actual = ComputeParametersCount();
int expected = -1;
JSFunction* function = this->function();
- expected = function->shared()->formal_parameter_count();
+ expected = function->shared()->internal_formal_parameter_count();
PrintIndex(accumulator, mode, index);
accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index cb8f4aafcd..6f43497a79 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -60,10 +60,6 @@ void BreakableStatementChecker::VisitModuleLiteral(ModuleLiteral* module) {
}
-void BreakableStatementChecker::VisitModuleVariable(ModuleVariable* module) {
-}
-
-
void BreakableStatementChecker::VisitModulePath(ModulePath* module) {
}
@@ -351,7 +347,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
#ifdef DEBUG
// Check that no context-specific object has been embedded.
- code->VerifyEmbeddedObjectsInFullCode();
+ code->VerifyEmbeddedObjects(Code::kNoContextSpecificPointers);
#endif // DEBUG
return true;
}
@@ -420,7 +416,7 @@ void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
void FullCodeGenerator::Initialize() {
- InitializeAstVisitor(info_->zone());
+ InitializeAstVisitor(info_->isolate(), info_->zone());
// The generation of debug code must match between the snapshot code and the
// code that is generated later. This is assumed by the debugger when it is
// calculating PC offsets after generating a debug version of code. Therefore
@@ -447,7 +443,7 @@ void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
- Handle<Code> ic = CodeFactory::StoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic = CodeFactory::StoreIC(isolate(), language_mode()).code();
CallIC(ic, id);
}
@@ -614,15 +610,14 @@ void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
if (module != NULL) {
Comment cmnt(masm_, "[ Link nested modules");
Scope* scope = module->body()->scope();
- Interface* interface = scope->interface();
- DCHECK(interface->IsModule() && interface->IsFrozen());
+ DCHECK(scope->module()->IsFrozen());
- interface->Allocate(scope->module_var()->index());
+ scope->module()->Allocate(scope->module_var()->index());
// Set up module context.
- DCHECK(scope->interface()->Index() >= 0);
- __ Push(Smi::FromInt(scope->interface()->Index()));
- __ Push(scope->GetScopeInfo());
+ DCHECK(scope->module()->Index() >= 0);
+ __ Push(Smi::FromInt(scope->module()->Index()));
+ __ Push(scope->GetScopeInfo(isolate()));
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
@@ -725,9 +720,14 @@ void FullCodeGenerator::VisitDeclarations(
AstVisitor::VisitDeclarations(declarations);
if (scope_->num_modules() != 0) {
+ // TODO(ES6): This step, which creates module instance objects,
+ // can probably be delayed until an "import *" declaration
+ // reifies a module instance. Until imports are implemented,
+ // we skip it altogether.
+ //
// Initialize modules from descriptor array.
- DCHECK(module_index_ == modules_->length());
- DeclareModules(modules_);
+ // DCHECK(module_index_ == modules_->length());
+ // DeclareModules(modules_);
modules_ = saved_modules;
module_index_ = saved_module_index;
}
@@ -750,7 +750,7 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
- Interface* interface = scope_->interface();
+ ModuleDescriptor* descriptor = scope_->module();
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
@@ -760,8 +760,8 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
int index = module_index_++;
// Set up module context.
- DCHECK(interface->Index() >= 0);
- __ Push(Smi::FromInt(interface->Index()));
+ DCHECK(descriptor->Index() >= 0);
+ __ Push(Smi::FromInt(descriptor->Index()));
__ Push(Smi::FromInt(0));
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
@@ -773,7 +773,7 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
// Populate the module description.
Handle<ModuleInfo> description =
- ModuleInfo::Create(isolate(), interface, scope_);
+ ModuleInfo::Create(isolate(), descriptor, scope_);
modules_->set(index, *description);
scope_ = saved_scope;
@@ -784,40 +784,21 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
}
-void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
- // Nothing to do.
- // The instance object is resolved statically through the module's interface.
-}
-
-
+// TODO(adamk): Delete ModulePath.
void FullCodeGenerator::VisitModulePath(ModulePath* module) {
- // Nothing to do.
- // The instance object is resolved statically through the module's interface.
}
+// TODO(adamk): Delete ModuleUrl.
void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
- // TODO(rossberg): dummy allocation for now.
- Scope* scope = module->body()->scope();
- Interface* interface = scope_->interface();
-
- DCHECK(interface->IsModule() && interface->IsFrozen());
- DCHECK(!modules_.is_null());
- DCHECK(module_index_ < modules_->length());
- interface->Allocate(scope->module_var()->index());
- int index = module_index_++;
-
- Handle<ModuleInfo> description =
- ModuleInfo::Create(isolate(), interface, scope_);
- modules_->set(index, *description);
}
int FullCodeGenerator::DeclareGlobalsFlags() {
- DCHECK(DeclareGlobalsStrictMode::is_valid(strict_mode()));
+ DCHECK(DeclareGlobalsLanguageMode::is_valid(language_mode()));
return DeclareGlobalsEvalFlag::encode(is_eval()) |
- DeclareGlobalsNativeFlag::encode(is_native()) |
- DeclareGlobalsStrictMode::encode(strict_mode());
+ DeclareGlobalsNativeFlag::encode(is_native()) |
+ DeclareGlobalsLanguageMode::encode(language_mode());
}
@@ -837,7 +818,7 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
} else {
// Check if the statement will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker(zone());
+ BreakableStatementChecker checker(info_->isolate(), zone());
checker.Check(stmt);
// Record the statement position right here if the statement is not
// breakable. For breakable statements the actual recording of the
@@ -858,13 +839,29 @@ void FullCodeGenerator::VisitSuperReference(SuperReference* super) {
}
+bool FullCodeGenerator::ValidateSuperCall(Call* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ if (new_target_var == nullptr) {
+ // TODO(dslomov): this is not exactly correct, the spec requires us
+ // to execute the constructor and only fail when an assigment to 'this'
+ // is attempted. Will implement once we have general new.target support,
+ // but also filed spec bug 3843 to make it an early error.
+ __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+ RecordJSReturnSite(expr);
+ context()->Plug(result_register());
+ return false;
+ }
+ return true;
+}
+
+
void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
if (!info_->is_debug()) {
CodeGenerator::RecordPositions(masm_, expr->position());
} else {
// Check if the expression will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker(zone());
+ BreakableStatementChecker checker(info_->isolate(), zone());
checker.Check(expr);
// Record a statement position right here if the expression is not
// breakable. For breakable expressions the actual recording of the
@@ -1052,19 +1049,15 @@ void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
Comment cmnt(masm_, "[ ArithmeticExpression");
Expression* left = expr->left();
Expression* right = expr->right();
- OverwriteMode mode =
- left->ResultOverwriteAllowed()
- ? OVERWRITE_LEFT
- : (right->ResultOverwriteAllowed() ? OVERWRITE_RIGHT : NO_OVERWRITE);
VisitForStackValue(left);
VisitForAccumulatorValue(right);
SetSourcePosition(expr->position());
if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr, op, mode, left, right);
+ EmitInlineSmiBinaryOp(expr, op, left, right);
} else {
- EmitBinaryOp(expr, op, mode);
+ EmitBinaryOp(expr, op);
}
}
@@ -1086,7 +1079,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
Comment cmnt(masm_, "[ Module context");
- __ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
+ DCHECK(stmt->body()->scope()->is_module_scope());
+
+ __ Push(Smi::FromInt(stmt->body()->scope()->module()->Index()));
__ Push(Smi::FromInt(0));
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(
@@ -1211,6 +1206,15 @@ void FullCodeGenerator::EmitUnwindBeforeReturn() {
}
+void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
+ BailoutId bailout_id) {
+ VisitForStackValue(property->key());
+ __ InvokeBuiltin(Builtins::TO_NAME, CALL_FUNCTION);
+ PrepareForBailoutForId(bailout_id, NO_REGISTERS);
+ __ Push(result_register());
+}
+
+
void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt);
@@ -1229,6 +1233,7 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushWithContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
Scope* saved_scope = scope();
scope_ = stmt->scope();
@@ -1582,8 +1587,7 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
{
EnterBlockScopeIfNeeded block_scope_state(
- this, lit->scope(), BailoutId::None(), BailoutId::None(),
- BailoutId::None());
+ this, lit->scope(), lit->EntryId(), lit->DeclsId(), lit->ExitId());
if (lit->raw_name() != NULL) {
__ Push(lit->name());
@@ -1642,8 +1646,8 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
// Copy the function data to the shared function info.
shared->set_function_data(fun->shared()->function_data());
- int parameters = fun->shared()->formal_parameter_count();
- shared->set_formal_parameter_count(parameters);
+ int parameters = fun->shared()->internal_formal_parameter_count();
+ shared->set_internal_formal_parameter_count(parameters);
EmitNewClosure(shared, false);
}
@@ -1791,7 +1795,7 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
codegen_->scope_ = scope;
{
Comment cmnt(masm(), "[ Extend block context");
- __ Push(scope->GetScopeInfo());
+ __ Push(scope->GetScopeInfo(codegen->isolate()));
codegen_->PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushBlockContext, 2);
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 1439942db8..72d343409e 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -29,8 +29,9 @@ class JumpPatchSite;
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
- explicit BreakableStatementChecker(Zone* zone) : is_breakable_(false) {
- InitializeAstVisitor(zone);
+ BreakableStatementChecker(Isolate* isolate, Zone* zone)
+ : is_breakable_(false) {
+ InitializeAstVisitor(isolate, zone);
}
void Check(Statement* stmt);
@@ -101,23 +102,21 @@ class FullCodeGenerator: public AstVisitor {
// Platform-specific code size multiplier.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
static const int kCodeSizeMultiplier = 105;
- static const int kBootCodeSizeMultiplier = 100;
#elif V8_TARGET_ARCH_X64
static const int kCodeSizeMultiplier = 170;
- static const int kBootCodeSizeMultiplier = 140;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 149;
- static const int kBootCodeSizeMultiplier = 110;
#elif V8_TARGET_ARCH_ARM64
// TODO(all): Copied ARM value. Check this is sensible for ARM64.
static const int kCodeSizeMultiplier = 149;
- static const int kBootCodeSizeMultiplier = 110;
+#elif V8_TARGET_ARCH_PPC64
+ static const int kCodeSizeMultiplier = 200;
+#elif V8_TARGET_ARCH_PPC
+ static const int kCodeSizeMultiplier = 200;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 149;
- static const int kBootCodeSizeMultiplier = 120;
#elif V8_TARGET_ARCH_MIPS64
static const int kCodeSizeMultiplier = 149;
- static const int kBootCodeSizeMultiplier = 120;
#else
#error Unsupported target architecture.
#endif
@@ -330,12 +329,15 @@ class FullCodeGenerator: public AstVisitor {
Label* if_true,
Label* if_false,
Label* fall_through);
-#else // All non-mips arch.
+#elif V8_TARGET_ARCH_PPC
+ void Split(Condition cc, Label* if_true, Label* if_false, Label* fall_through,
+ CRegister cr = cr7);
+#else // All other arch.
void Split(Condition cc,
Label* if_true,
Label* if_false,
Label* fall_through);
-#endif // V8_TARGET_ARCH_MIPS
+#endif
// Load the value of a known (PARAMETER, LOCAL, or CONTEXT) variable into
// a register. Emits a context chain walk if if necessary (so does
@@ -487,6 +489,7 @@ class FullCodeGenerator: public AstVisitor {
// Platform-specific code sequences for calls
void EmitCall(Call* expr, CallICState::CallType = CallICState::FUNCTION);
+ void EmitSuperConstructorCall(Call* expr);
void EmitCallWithLoadIC(Call* expr);
void EmitSuperCallWithLoadIC(Call* expr);
void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
@@ -568,17 +571,17 @@ class FullCodeGenerator: public AstVisitor {
// in the accumulator after installing all the properties.
void EmitClassDefineProperties(ClassLiteral* lit);
+ // Pushes the property key as a Name on the stack.
+ void EmitPropertyKey(ObjectLiteralProperty* property, BailoutId bailout_id);
+
// Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator.
- void EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode);
+ void EmitBinaryOp(BinaryOperation* expr, Token::Value op);
// Helper functions for generating inlined smi code for certain
// binary operations.
void EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left,
Expression* right);
@@ -589,11 +592,11 @@ class FullCodeGenerator: public AstVisitor {
// Shall an error be thrown if assignment with 'op' operation is perfomed
// on this variable in given language mode?
static bool IsSignallingAssignmentToConst(Variable* var, Token::Value op,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
if (var->mode() == CONST) return op != Token::INIT_CONST;
if (var->mode() == CONST_LEGACY) {
- return strict_mode == STRICT && op != Token::INIT_CONST_LEGACY;
+ return is_strict(language_mode) && op != Token::INIT_CONST_LEGACY;
}
return false;
@@ -636,7 +639,8 @@ class FullCodeGenerator: public AstVisitor {
// |offset| is the offset in the stack where the home object can be found.
void EmitSetHomeObjectIfNeeded(Expression* initializer, int offset);
- void EmitLoadSuperConstructor(SuperReference* expr);
+ void EmitLoadSuperConstructor();
+ bool ValidateSuperCall(Call* expr);
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
@@ -672,7 +676,8 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
- StrictMode strict_mode() { return function()->strict_mode(); }
+ LanguageMode language_mode() { return function()->language_mode(); }
+ bool is_simple_parameter_list() { return info_->is_simple_parameter_list(); }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; }
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index a19fb51dd7..69b48d6644 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -1918,7 +1918,7 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
#ifdef __MACH_O
- Zone zone(isolate);
+ Zone zone;
MachO mach_o(&zone);
Writer w(&mach_o);
@@ -1930,7 +1930,7 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
#else
- Zone zone(isolate);
+ Zone zone;
ELF elf(&zone);
Writer w(&elf);
@@ -2127,7 +2127,7 @@ void GDBJITInterface::RemoveCode(Code* code) {
void GDBJITInterface::RemoveCodeRange(Address start, Address end) {
HashMap* entries = GetEntries();
- Zone zone(Isolate::Current());
+ Zone zone;
ZoneList<Code*> dead_codes(1, &zone);
for (HashMap::Entry* e = entries->Start(); e != NULL; e = entries->Next(e)) {
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 4f744d6dfb..277cad6c3c 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -203,7 +203,6 @@ class GlobalHandles::Node {
// Callback parameter accessors.
void set_parameter(void* parameter) {
DCHECK(IsInUse());
- DCHECK(weakness_type() == NORMAL_WEAK || weakness_type() == PHANTOM_WEAK);
parameter_or_next_free_.parameter = parameter;
}
void* parameter() const {
@@ -211,30 +210,6 @@ class GlobalHandles::Node {
return parameter_or_next_free_.parameter;
}
- void set_internal_fields(int internal_field_index1,
- int internal_field_index2) {
- DCHECK(weakness_type() == INTERNAL_FIELDS_WEAK);
- // These are stored in an int16_t.
- DCHECK(internal_field_index1 < 1 << 16);
- DCHECK(internal_field_index1 >= -(1 << 16));
- DCHECK(internal_field_index2 < 1 << 16);
- DCHECK(internal_field_index2 >= -(1 << 16));
- parameter_or_next_free_.internal_field_indeces.internal_field1 =
- static_cast<int16_t>(internal_field_index1);
- parameter_or_next_free_.internal_field_indeces.internal_field2 =
- static_cast<int16_t>(internal_field_index2);
- }
-
- int internal_field1() const {
- DCHECK(weakness_type() == INTERNAL_FIELDS_WEAK);
- return parameter_or_next_free_.internal_field_indeces.internal_field1;
- }
-
- int internal_field2() const {
- DCHECK(weakness_type() == INTERNAL_FIELDS_WEAK);
- return parameter_or_next_free_.internal_field_indeces.internal_field2;
- }
-
// Accessors for next free node in the free list.
Node* next_free() {
DCHECK(state() == FREE);
@@ -255,23 +230,22 @@ class GlobalHandles::Node {
weak_callback_ = weak_callback;
}
- void MakePhantom(void* parameter,
- PhantomCallbackData<void>::Callback phantom_callback,
- int16_t internal_field_index1,
- int16_t internal_field_index2) {
+ void MakePhantom(void* parameter, int number_of_internal_fields,
+ PhantomCallbackData<void>::Callback phantom_callback) {
+ DCHECK(number_of_internal_fields >= 0);
+ DCHECK(number_of_internal_fields <= 2);
DCHECK(phantom_callback != NULL);
DCHECK(IsInUse());
CHECK(object_ != NULL);
set_state(WEAK);
- if (parameter == NULL) {
- set_weakness_type(INTERNAL_FIELDS_WEAK);
- set_internal_fields(internal_field_index1, internal_field_index2);
+ if (number_of_internal_fields == 0) {
+ set_weakness_type(PHANTOM_WEAK_0_INTERNAL_FIELDS);
+ } else if (number_of_internal_fields == 1) {
+ set_weakness_type(PHANTOM_WEAK_1_INTERNAL_FIELDS);
} else {
- DCHECK(internal_field_index1 == v8::Object::kNoInternalFieldIndex);
- DCHECK(internal_field_index2 == v8::Object::kNoInternalFieldIndex);
- set_weakness_type(PHANTOM_WEAK);
- set_parameter(parameter);
+ set_weakness_type(PHANTOM_WEAK_2_INTERNAL_FIELDS);
}
+ set_parameter(parameter);
weak_callback_ = reinterpret_cast<WeakCallback>(phantom_callback);
}
@@ -284,63 +258,51 @@ class GlobalHandles::Node {
}
void CollectPhantomCallbackData(
- Isolate* isolate, List<PendingPhantomCallback>* pending_phantom_callbacks,
- List<PendingInternalFieldsCallback>* pending_internal_fields_callbacks) {
- if (state() != Node::PENDING) return;
- bool do_release = true;
+ Isolate* isolate,
+ List<PendingPhantomCallback>* pending_phantom_callbacks) {
+ if (state() != PENDING) return;
if (weak_callback_ != NULL) {
if (weakness_type() == NORMAL_WEAK) return;
v8::Isolate* api_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- if (weakness_type() == PHANTOM_WEAK) {
- // Phantom weak pointer case. Zap with harmless value.
- DCHECK(*location() == Smi::FromInt(0));
- typedef PhantomCallbackData<void> Data;
-
- Data data(api_isolate, parameter());
- Data::Callback callback =
- reinterpret_cast<Data::Callback>(weak_callback_);
+ DCHECK(weakness_type() == PHANTOM_WEAK_0_INTERNAL_FIELDS ||
+ weakness_type() == PHANTOM_WEAK_1_INTERNAL_FIELDS ||
+ weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
- pending_phantom_callbacks->Add(
- PendingPhantomCallback(this, data, callback));
-
- // Postpone the release of the handle. The embedder can't use the
- // handle (it's zapped), but it may be using the location, and we
- // don't want to confuse things by reusing that.
- do_release = false;
- } else {
- DCHECK(weakness_type() == INTERNAL_FIELDS_WEAK);
- typedef InternalFieldsCallbackData<void, void> Data;
-
- // Phantom weak pointer case, passing internal fields instead of
- // parameter. Don't use a handle here during GC, because it will
- // create a handle pointing to a dying object, which can confuse
- // the next GC.
+ Object* internal_field0 = nullptr;
+ Object* internal_field1 = nullptr;
+ if (weakness_type() != PHANTOM_WEAK_0_INTERNAL_FIELDS) {
JSObject* jsobject = reinterpret_cast<JSObject*>(object());
DCHECK(jsobject->IsJSObject());
- Data data(api_isolate, jsobject->GetInternalField(internal_field1()),
- jsobject->GetInternalField(internal_field2()));
- Data::Callback callback =
- reinterpret_cast<Data::Callback>(weak_callback_);
-
- // In the future, we want to delay the callback. In that case we will
- // zap when we queue up, to stop the C++ side accessing the dead V8
- // object, but we will call Release only after the callback (allowing
- // the node to be reused).
- pending_internal_fields_callbacks->Add(
- PendingInternalFieldsCallback(data, callback));
+ DCHECK(jsobject->GetInternalFieldCount() >= 1);
+ internal_field0 = jsobject->GetInternalField(0);
+ if (weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS) {
+ DCHECK(jsobject->GetInternalFieldCount() >= 2);
+ internal_field1 = jsobject->GetInternalField(1);
+ }
}
+
+ // Zap with harmless value.
+ *location() = Smi::FromInt(0);
+ typedef PhantomCallbackData<void> Data;
+
+ if (!internal_field0->IsSmi()) internal_field0 = nullptr;
+ if (!internal_field1->IsSmi()) internal_field1 = nullptr;
+
+ Data data(api_isolate, parameter(), internal_field0, internal_field1);
+ Data::Callback callback =
+ reinterpret_cast<Data::Callback>(weak_callback_);
+
+ pending_phantom_callbacks->Add(
+ PendingPhantomCallback(this, data, callback));
+ DCHECK(IsInUse());
+ set_state(NEAR_DEATH);
}
- // TODO(erikcorry): At the moment the callbacks are not postponed much,
- // but if we really postpone them until after the mutator has run, we
- // need to divide things up, so that an early callback clears the handle,
- // while a later one destroys the objects involved, possibley triggering
- // some work when decremented ref counts hit zero.
- if (do_release) Release();
}
bool PostGarbageCollectionProcessing(Isolate* isolate) {
+ // Handles only weak handles (not phantom) that are dying.
if (state() != Node::PENDING) return false;
if (weak_callback_ == NULL) {
Release();
@@ -354,11 +316,11 @@ class GlobalHandles::Node {
ExternalOneByteString::cast(object_)->resource() != NULL);
DCHECK(!object_->IsExternalTwoByteString() ||
ExternalTwoByteString::cast(object_)->resource() != NULL);
+ if (weakness_type() != NORMAL_WEAK) return false;
+
// Leaving V8.
VMState<EXTERNAL> vmstate(isolate);
HandleScope handle_scope(isolate);
- if (weakness_type() == PHANTOM_WEAK) return false;
- DCHECK(weakness_type() == NORMAL_WEAK);
Object** object = location();
Handle<Object> handle(*object, isolate);
v8::WeakCallbackData<v8::Value, void> data(
@@ -410,10 +372,6 @@ class GlobalHandles::Node {
// the free list link.
union {
void* parameter;
- struct {
- int16_t internal_field1;
- int16_t internal_field2;
- } internal_field_indeces;
Node* next_free;
} parameter_or_next_free_;
@@ -607,29 +565,27 @@ void GlobalHandles::MakeWeak(Object** location, void* parameter,
typedef PhantomCallbackData<void>::Callback GenericCallback;
-void GlobalHandles::MakePhantom(
- Object** location,
- v8::InternalFieldsCallbackData<void, void>::Callback phantom_callback,
- int16_t internal_field_index1, int16_t internal_field_index2) {
- Node::FromLocation(location)
- ->MakePhantom(NULL, reinterpret_cast<GenericCallback>(phantom_callback),
- internal_field_index1, internal_field_index2);
-}
-
-
void GlobalHandles::MakePhantom(Object** location, void* parameter,
+ int number_of_internal_fields,
GenericCallback phantom_callback) {
- Node::FromLocation(location)->MakePhantom(parameter, phantom_callback,
- v8::Object::kNoInternalFieldIndex,
- v8::Object::kNoInternalFieldIndex);
+ Node::FromLocation(location)
+ ->MakePhantom(parameter, number_of_internal_fields, phantom_callback);
}
-void GlobalHandles::CollectPhantomCallbackData() {
+void GlobalHandles::CollectAllPhantomCallbackData() {
for (NodeIterator it(this); !it.done(); it.Advance()) {
Node* node = it.node();
- node->CollectPhantomCallbackData(isolate(), &pending_phantom_callbacks_,
- &pending_internal_fields_callbacks_);
+ node->CollectPhantomCallbackData(isolate(), &pending_phantom_callbacks_);
+ }
+}
+
+
+void GlobalHandles::CollectYoungPhantomCallbackData() {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ DCHECK(node->is_in_new_space_list());
+ node->CollectPhantomCallbackData(isolate(), &pending_phantom_callbacks_);
}
}
@@ -668,22 +624,22 @@ void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
Node* node = it.node();
if (node->IsWeakRetainer()) {
- // Weakness type can be normal, phantom or internal fields.
- // For normal weakness we mark through the handle so that
- // the object and things reachable from it are available
- // to the callback.
- // In the case of phantom we can zap the object handle now
- // and we won't need it, so we don't need to mark through it.
+ // Weakness type can be normal or phantom, with or without internal
+ // fields). For normal weakness we mark through the handle so that the
+ // object and things reachable from it are available to the callback.
+ //
+ // In the case of phantom with no internal fields, we can zap the object
+ // handle now and we won't need it, so we don't need to mark through it.
// In the internal fields case we will need the internal
- // fields, so we can't zap the handle, but we don't need to
- // mark through it, because it will die in this GC round.
+ // fields, so we can't zap the handle.
if (node->state() == Node::PENDING) {
- if (node->weakness_type() == PHANTOM_WEAK) {
+ if (node->weakness_type() == PHANTOM_WEAK_0_INTERNAL_FIELDS) {
*(node->location()) = Smi::FromInt(0);
} else if (node->weakness_type() == NORMAL_WEAK) {
v->VisitPointer(node->location());
} else {
- DCHECK(node->weakness_type() == INTERNAL_FIELDS_WEAK);
+ DCHECK(node->weakness_type() == PHANTOM_WEAK_1_INTERNAL_FIELDS ||
+ node->weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
}
} else {
// Node is not pending, so that means the object survived. We still
@@ -736,12 +692,13 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
DCHECK(node->is_in_new_space_list());
if ((node->is_independent() || node->is_partially_dependent()) &&
node->IsWeakRetainer()) {
- if (node->weakness_type() == PHANTOM_WEAK) {
+ if (node->weakness_type() == PHANTOM_WEAK_0_INTERNAL_FIELDS) {
*(node->location()) = Smi::FromInt(0);
} else if (node->weakness_type() == NORMAL_WEAK) {
v->VisitPointer(node->location());
} else {
- DCHECK(node->weakness_type() == INTERNAL_FIELDS_WEAK);
+ DCHECK(node->weakness_type() == PHANTOM_WEAK_1_INTERNAL_FIELDS ||
+ node->weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
// For this case we only need to trace if it's alive: The tracing of
// something that is already alive is just to get the pointer updated
// to the new location of the object).
@@ -886,13 +843,9 @@ int GlobalHandles::DispatchPendingPhantomCallbacks() {
int freed_nodes = 0;
while (pending_phantom_callbacks_.length() != 0) {
PendingPhantomCallback callback = pending_phantom_callbacks_.RemoveLast();
+ DCHECK(callback.node()->IsInUse());
callback.invoke();
- freed_nodes++;
- }
- while (pending_internal_fields_callbacks_.length() != 0) {
- PendingInternalFieldsCallback callback =
- pending_internal_fields_callbacks_.RemoveLast();
- callback.invoke();
+ DCHECK(!callback.node()->IsInUse());
freed_nodes++;
}
return freed_nodes;
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index b3756d0e62..767989c77e 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -98,9 +98,15 @@ struct ObjectGroupRetainerInfo {
enum WeaknessType {
- NORMAL_WEAK, // Embedder gets a handle to the dying object.
- PHANTOM_WEAK, // Embedder gets the parameter they passed in earlier.
- INTERNAL_FIELDS_WEAK // Embedder gets 2 internal fields from dying object.
+ NORMAL_WEAK, // Embedder gets a handle to the dying object.
+ // In the following cases, the embedder gets the parameter they passed in
+ // earlier, and the 0, 1 or 2 first internal fields. Note that the internal
+ // fields must contain aligned non-V8 pointers. Getting pointers to V8
+ // objects through this interface would be GC unsafe so in that case the
+ // embedder gets a null pointer instead.
+ PHANTOM_WEAK_0_INTERNAL_FIELDS,
+ PHANTOM_WEAK_1_INTERNAL_FIELDS,
+ PHANTOM_WEAK_2_INTERNAL_FIELDS
};
@@ -140,14 +146,9 @@ class GlobalHandles {
// It would be nice to template this one, but it's really hard to get
// the template instantiator to work right if you do.
static void MakePhantom(Object** location, void* parameter,
+ int number_of_internal_fields,
PhantomCallbackData<void>::Callback weak_callback);
- static void MakePhantom(
- Object** location,
- v8::InternalFieldsCallbackData<void, void>::Callback weak_callback,
- int16_t internal_field_index1,
- int16_t internal_field_index2 = v8::Object::kNoInternalFieldIndex);
-
void RecordStats(HeapStats* stats);
// Returns the current number of weak handles.
@@ -164,7 +165,11 @@ class GlobalHandles {
// Collect up data for the weak handle callbacks after GC has completed, but
// before memory is reclaimed.
- void CollectPhantomCallbackData();
+ void CollectAllPhantomCallbackData();
+
+ // Collect up data for the weak handle callbacks referenced by young
+ // generation after GC has completed, but before memory is reclaimed.
+ void CollectYoungPhantomCallbackData();
// Clear the weakness of a global handle.
static void* ClearWeakness(Object** location);
@@ -302,7 +307,6 @@ class GlobalHandles {
class NodeBlock;
class NodeIterator;
class PendingPhantomCallback;
- class PendingInternalFieldsCallback;
Isolate* isolate_;
@@ -336,7 +340,6 @@ class GlobalHandles {
List<ObjectGroupConnection> implicit_ref_connections_;
List<PendingPhantomCallback> pending_phantom_callbacks_;
- List<PendingInternalFieldsCallback> pending_internal_fields_callbacks_;
friend class Isolate;
@@ -361,20 +364,6 @@ class GlobalHandles::PendingPhantomCallback {
};
-class GlobalHandles::PendingInternalFieldsCallback {
- public:
- typedef InternalFieldsCallbackData<void, void> Data;
- PendingInternalFieldsCallback(Data data, Data::Callback callback)
- : data_(data), callback_(callback) {}
-
- void invoke() { callback_(data_); }
-
- private:
- Data data_;
- Data::Callback callback_;
-};
-
-
class EternalHandles {
public:
enum SingletonHandle {
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 52ec2aaaa4..32396d89ea 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -22,13 +22,15 @@
# define V8_INFINITY std::numeric_limits<double>::infinity()
#elif V8_LIBC_MSVCRT
# define V8_INFINITY HUGE_VAL
+#elif V8_OS_AIX
+#define V8_INFINITY (__builtin_inff())
#else
# define V8_INFINITY INFINITY
#endif
#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
#define V8_TURBOFAN_BACKEND 1
#else
#define V8_TURBOFAN_BACKEND 0
@@ -59,6 +61,9 @@ namespace internal {
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
+#if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC)
+#define USE_SIMULATOR 1
+#endif
#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
@@ -83,7 +88,7 @@ namespace internal {
// Determine whether double field unboxing feature is enabled.
-#if (V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64)
+#if V8_TARGET_ARCH_64_BIT
#define V8_DOUBLE_FIELDS_UNBOXING 0
#else
#define V8_DOUBLE_FIELDS_UNBOXING 0
@@ -220,7 +225,47 @@ template <typename T, class P = FreeStoreAllocationPolicy> class List;
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-enum StrictMode { SLOPPY, STRICT };
+enum LanguageMode {
+ // LanguageMode is expressed as a bitmask. Descriptions of the bits:
+ STRICT_BIT = 1 << 0,
+ STRONG_BIT = 1 << 1,
+ LANGUAGE_END,
+
+ // Shorthands for some common language modes.
+ SLOPPY = 0,
+ STRICT = STRICT_BIT,
+ STRONG = STRICT_BIT | STRONG_BIT
+};
+
+
+inline bool is_sloppy(LanguageMode language_mode) {
+ return (language_mode & STRICT_BIT) == 0;
+}
+
+
+inline bool is_strict(LanguageMode language_mode) {
+ return language_mode & STRICT_BIT;
+}
+
+
+inline bool is_strong(LanguageMode language_mode) {
+ return language_mode & STRONG_BIT;
+}
+
+
+inline bool is_valid_language_mode(int language_mode) {
+ return language_mode == SLOPPY || language_mode == STRICT ||
+ language_mode == STRONG;
+}
+
+
+inline LanguageMode construct_language_mode(bool strict_bit, bool strong_bit) {
+ int language_mode = 0;
+ if (strict_bit) language_mode |= STRICT_BIT;
+ if (strong_bit) language_mode |= STRONG_BIT;
+ DCHECK(is_valid_language_mode(language_mode));
+ return static_cast<LanguageMode>(language_mode);
+}
// Mask for the sign bit in a smi.
@@ -328,7 +373,6 @@ class JSArray;
class JSFunction;
class JSObject;
class LargeObjectSpace;
-class LookupResult;
class MacroAssembler;
class Map;
class MapSpace;
@@ -482,9 +526,11 @@ enum CallFunctionFlags {
enum CallConstructorFlags {
- NO_CALL_CONSTRUCTOR_FLAGS,
+ NO_CALL_CONSTRUCTOR_FLAGS = 0,
// The call target is cached in the instruction stream.
- RECORD_CONSTRUCTOR_TARGET
+ RECORD_CONSTRUCTOR_TARGET = 1,
+ SUPER_CONSTRUCTOR_CALL = 1 << 1,
+ SUPER_CALL_RECORD_TARGET = SUPER_CONSTRUCTOR_CALL | RECORD_CONSTRUCTOR_TARGET
};
@@ -562,9 +608,6 @@ struct AccessorDescriptor {
#define HAS_SMI_TAG(value) \
((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
-#define HAS_FAILURE_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
-
// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
#define OBJECT_POINTER_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
@@ -606,6 +649,7 @@ enum CpuFeature {
SAHF,
AVX,
FMA3,
+ ATOM,
// ARM
VFP3,
ARMv7,
@@ -625,6 +669,10 @@ enum CpuFeature {
// ARM64
ALWAYS_ALIGN_CSP,
COHERENT_CACHE,
+ // PPC
+ FPR_GPR_MOV,
+ LWSYNC,
+ ISELECT,
NUMBER_OF_CPU_FEATURES
};
@@ -649,14 +697,11 @@ enum ScopeType {
};
-const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
-const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
-const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
+const uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
+const uint32_t kHoleNanLower32 = 0xFFF7FFFF;
const uint64_t kHoleNanInt64 =
(static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
-const uint64_t kLastNonNaNInt64 =
- (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
// The order of this enum has to be kept in sync with the predicates below.
@@ -666,12 +711,10 @@ enum VariableMode {
CONST_LEGACY, // declared via legacy 'const' declarations
- LET, // declared via 'let' declarations (first lexical)
+ LET, // declared via 'let' declarations
CONST, // declared via 'const' declarations
- MODULE, // declared via 'module' declaration (last lexical)
-
// Variables introduced by the compiler:
INTERNAL, // like VAR, but not user-visible (may or may not
// be in a context)
@@ -699,17 +742,17 @@ inline bool IsDynamicVariableMode(VariableMode mode) {
inline bool IsDeclaredVariableMode(VariableMode mode) {
- return mode >= VAR && mode <= MODULE;
+ return mode >= VAR && mode <= CONST;
}
inline bool IsLexicalVariableMode(VariableMode mode) {
- return mode >= LET && mode <= MODULE;
+ return mode == LET || mode == CONST;
}
inline bool IsImmutableVariableMode(VariableMode mode) {
- return (mode >= CONST && mode <= MODULE) || mode == CONST_LEGACY;
+ return mode == CONST || mode == CONST_LEGACY;
}
@@ -770,11 +813,16 @@ enum Signedness { kSigned, kUnsigned };
enum FunctionKind {
kNormalFunction = 0,
- kArrowFunction = 1,
- kGeneratorFunction = 2,
- kConciseMethod = 4,
+ kArrowFunction = 1 << 0,
+ kGeneratorFunction = 1 << 1,
+ kConciseMethod = 1 << 2,
kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
- kDefaultConstructor = 8
+ kAccessorFunction = 1 << 3,
+ kDefaultConstructor = 1 << 4,
+ kSubclassConstructor = 1 << 5,
+ kBaseConstructor = 1 << 6,
+ kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
+ kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor
};
@@ -784,7 +832,11 @@ inline bool IsValidFunctionKind(FunctionKind kind) {
kind == FunctionKind::kGeneratorFunction ||
kind == FunctionKind::kConciseMethod ||
kind == FunctionKind::kConciseGeneratorMethod ||
- kind == FunctionKind::kDefaultConstructor;
+ kind == FunctionKind::kAccessorFunction ||
+ kind == FunctionKind::kDefaultBaseConstructor ||
+ kind == FunctionKind::kDefaultSubclassConstructor ||
+ kind == FunctionKind::kBaseConstructor ||
+ kind == FunctionKind::kSubclassConstructor;
}
@@ -806,12 +858,36 @@ inline bool IsConciseMethod(FunctionKind kind) {
}
+inline bool IsAccessorFunction(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kAccessorFunction;
+}
+
+
inline bool IsDefaultConstructor(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
return kind & FunctionKind::kDefaultConstructor;
}
+inline bool IsBaseConstructor(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kBaseConstructor;
+}
+
+
+inline bool IsSubclassConstructor(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kSubclassConstructor;
+}
+
+
+inline bool IsConstructor(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind &
+ (FunctionKind::kBaseConstructor | FunctionKind::kSubclassConstructor |
+ FunctionKind::kDefaultConstructor);
+}
} } // namespace v8::internal
namespace i = v8::internal;
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
index 5d1262a06d..72625a57f5 100644
--- a/deps/v8/src/harmony-array.js
+++ b/deps/v8/src/harmony-array.js
@@ -142,33 +142,49 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
}
}
- var iterable = ToIterable(items);
+ var iterable = GetMethod(items, symbolIterator);
var k;
var result;
var mappedValue;
var nextValue;
if (!IS_UNDEFINED(iterable)) {
- result = IS_SPEC_FUNCTION(this) && this.prototype ? new this() : [];
+ result = %IsConstructor(this) ? new this() : [];
+
+ var iterator = GetIterator(items, iterable);
k = 0;
- for (nextValue of items) {
- if (mapping) mappedValue = %_CallFunction(receiver, nextValue, k, mapfn);
- else mappedValue = nextValue;
+ while (true) {
+ var next = iterator.next();
+
+ if (!IS_OBJECT(next)) {
+ throw MakeTypeError("iterator_result_not_an_object", [next]);
+ }
+
+ if (next.done) {
+ result.length = k;
+ return result;
+ }
+
+ nextValue = next.value;
+ if (mapping) {
+ mappedValue = %_CallFunction(receiver, nextValue, k, mapfn);
+ } else {
+ mappedValue = nextValue;
+ }
%AddElement(result, k++, mappedValue, NONE);
}
-
- result.length = k;
- return result;
} else {
var len = ToLength(items.length);
- result = IS_SPEC_FUNCTION(this) && this.prototype ? new this(len) :
- new $Array(len);
+ result = %IsConstructor(this) ? new this(len) : new $Array(len);
for (k = 0; k < len; ++k) {
nextValue = items[k];
- if (mapping) mappedValue = %_CallFunction(receiver, nextValue, k, mapfn);
- else mappedValue = nextValue;
+ if (mapping) {
+ mappedValue = %_CallFunction(receiver, nextValue, k, mapfn);
+ } else {
+ mappedValue = nextValue;
+ }
%AddElement(result, k, mappedValue, NONE);
}
@@ -182,7 +198,7 @@ function ArrayOf() {
var length = %_ArgumentsLength();
var constructor = this;
// TODO: Implement IsConstructor (ES6 section 7.2.5)
- var array = IS_SPEC_FUNCTION(constructor) ? new constructor(length) : [];
+ var array = %IsConstructor(constructor) ? new constructor(length) : [];
for (var i = 0; i < length; i++) {
%AddElement(array, i, %_Arguments(i), NONE);
}
diff --git a/deps/v8/src/harmony-classes.js b/deps/v8/src/harmony-classes.js
deleted file mode 100644
index ac0675862e..0000000000
--- a/deps/v8/src/harmony-classes.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// This file relies on the fact that the following declarations have been made
-// in runtime.js:
-// var $Function = global.Function;
-// var $Array = global.Array;
-
-"use strict";
-
-function FunctionToMethod(homeObject) {
- if (!IS_SPEC_FUNCTION(this)) {
- throw MakeTypeError('toMethod_non_function',
- [%ToString(this), typeof this]);
-
- }
-
- if (!IS_SPEC_OBJECT(homeObject)) {
- throw MakeTypeError('toMethod_non_object',
- [%ToString(homeObject)]);
- }
-
- return %ToMethod(this, homeObject);
-}
-
-function SetupHarmonyClasses() {
- %CheckIsBootstrapping();
-
- InstallFunctions($Function.prototype, DONT_ENUM, $Array(
- "toMethod", FunctionToMethod
- ));
-}
-
-SetupHarmonyClasses();
diff --git a/deps/v8/src/harmony-tostring.js b/deps/v8/src/harmony-tostring.js
index 0336456bb2..aed8ca0399 100644
--- a/deps/v8/src/harmony-tostring.js
+++ b/deps/v8/src/harmony-tostring.js
@@ -9,19 +9,6 @@
// var $Object = global.Object;
// var $Symbol = global.Symbol;
-var kBuiltinStringTags = {
- "__proto__": null,
- "Arguments": true,
- "Array": true,
- "Boolean": true,
- "Date": true,
- "Error": true,
- "Function": true,
- "Number": true,
- "RegExp": true,
- "String": true
-};
-
DefaultObjectToString = ObjectToStringHarmony;
// ES6 draft 08-24-14, section 19.1.3.6
function ObjectToStringHarmony() {
@@ -30,12 +17,8 @@ function ObjectToStringHarmony() {
var O = ToObject(this);
var builtinTag = %_ClassOf(O);
var tag = O[symbolToStringTag];
- if (IS_UNDEFINED(tag)) {
+ if (!IS_STRING(tag)) {
tag = builtinTag;
- } else if (!IS_STRING(tag)) {
- return "[object ???]"
- } else if (tag !== builtinTag && kBuiltinStringTags[tag]) {
- return "[object ~" + tag + "]";
}
return "[object " + tag + "]";
}
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index 196eb130a5..8e185184ce 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -1422,13 +1422,7 @@ void V8HeapExplorer::ExtractAccessorInfoReferences(
SetInternalReference(accessor_info, entry, "expected_receiver_type",
accessor_info->expected_receiver_type(),
AccessorInfo::kExpectedReceiverTypeOffset);
- if (accessor_info->IsDeclaredAccessorInfo()) {
- DeclaredAccessorInfo* declared_accessor_info =
- DeclaredAccessorInfo::cast(accessor_info);
- SetInternalReference(declared_accessor_info, entry, "descriptor",
- declared_accessor_info->descriptor(),
- DeclaredAccessorInfo::kDescriptorOffset);
- } else if (accessor_info->IsExecutableAccessorInfo()) {
+ if (accessor_info->IsExecutableAccessorInfo()) {
ExecutableAccessorInfo* executable_accessor_info =
ExecutableAccessorInfo::cast(accessor_info);
SetInternalReference(executable_accessor_info, entry, "getter",
@@ -1628,7 +1622,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.location()) {
- case IN_OBJECT: {
+ case kField: {
Representation r = details.representation();
if (r.IsSmi() || r.IsDouble()) break;
@@ -1648,7 +1642,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
}
break;
}
- case IN_DESCRIPTOR:
+ case kDescriptor:
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
descs->GetKey(i),
descs->GetValue(i));
@@ -2037,7 +2031,7 @@ void V8HeapExplorer::SetDataOrAccessorPropertyReference(
PropertyKind kind, JSObject* parent_obj, int parent_entry,
Name* reference_name, Object* child_obj, const char* name_format_string,
int field_offset) {
- if (kind == ACCESSOR) {
+ if (kind == kAccessor) {
ExtractAccessorPairProperty(parent_obj, parent_entry, reference_name,
child_obj, field_offset);
} else {
@@ -2557,13 +2551,17 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
- debug_heap->Verify();
+ if (FLAG_verify_heap) {
+ debug_heap->Verify();
+ }
#endif
SetProgressTotal(2); // 2 passes.
#ifdef VERIFY_HEAP
- debug_heap->Verify();
+ if (FLAG_verify_heap) {
+ debug_heap->Verify();
+ }
#endif
snapshot_->AddSyntheticRootEntries();
@@ -3104,7 +3102,7 @@ void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
WriteUChar(writer_, *s);
} else {
// Convert UTF-8 into \u UTF-16 literal.
- unsigned length = 1, cursor = 0;
+ size_t length = 1, cursor = 0;
for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
if (c != unibrow::Utf8::kBadChar) {
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index a35872dc47..6f550aabff 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -31,8 +31,8 @@ GCTracer::ContextDisposalEvent::ContextDisposalEvent(double time) {
}
-GCTracer::SurvivalEvent::SurvivalEvent(double survival_rate) {
- survival_rate_ = survival_rate;
+GCTracer::SurvivalEvent::SurvivalEvent(double promotion_ratio) {
+ promotion_ratio_ = promotion_ratio;
}
@@ -257,8 +257,8 @@ void GCTracer::AddContextDisposalTime(double time) {
}
-void GCTracer::AddSurvivalRate(double survival_rate) {
- survival_events_.push_front(SurvivalEvent(survival_rate));
+void GCTracer::AddSurvivalRatio(double promotion_ratio) {
+ survival_events_.push_front(SurvivalEvent(promotion_ratio));
}
@@ -350,6 +350,8 @@ void GCTracer::PrintNVP() const {
PrintF("misc_compaction=%.1f ",
current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]);
PrintF("weak_closure=%.1f ", current_.scopes[Scope::MC_WEAKCLOSURE]);
+ PrintF("inc_weak_closure=%.1f ",
+ current_.scopes[Scope::MC_INCREMENTAL_WEAKCLOSURE]);
PrintF("weakcollection_process=%.1f ",
current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]);
PrintF("weakcollection_clear=%.1f ",
@@ -372,9 +374,9 @@ void GCTracer::PrintNVP() const {
PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
PrintF("promotion_ratio=%.1f%% ", heap_->promotion_ratio_);
+ PrintF("average_survival_ratio=%.1f%% ", AverageSurvivalRatio());
PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
- PrintF("average_survival_rate%.1f%% ", AverageSurvivalRate());
PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
NewSpaceAllocationThroughputInBytesPerMillisecond());
PrintF("context_disposal_rate=%.1f ", ContextDisposalRateInMilliseconds());
@@ -570,13 +572,13 @@ double GCTracer::ContextDisposalRateInMilliseconds() const {
}
-double GCTracer::AverageSurvivalRate() const {
+double GCTracer::AverageSurvivalRatio() const {
if (survival_events_.size() == 0) return 0.0;
double sum_of_rates = 0.0;
SurvivalEventBuffer::const_iterator iter = survival_events_.begin();
while (iter != survival_events_.end()) {
- sum_of_rates += iter->survival_rate_;
+ sum_of_rates += iter->promotion_ratio_;
++iter;
}
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 528eb52c64..ca144b24b4 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -108,6 +108,7 @@ class GCTracer {
MC_UPDATE_POINTERS_TO_EVACUATED,
MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
MC_UPDATE_MISC_POINTERS,
+ MC_INCREMENTAL_WEAKCLOSURE,
MC_WEAKCLOSURE,
MC_WEAKCOLLECTION_PROCESS,
MC_WEAKCOLLECTION_CLEAR,
@@ -169,9 +170,9 @@ class GCTracer {
// Default constructor leaves the event uninitialized.
SurvivalEvent() {}
- explicit SurvivalEvent(double survival_rate);
+ explicit SurvivalEvent(double survival_ratio);
- double survival_rate_;
+ double promotion_ratio_;
};
@@ -299,7 +300,7 @@ class GCTracer {
void AddContextDisposalTime(double time);
- void AddSurvivalRate(double survival_rate);
+ void AddSurvivalRatio(double survival_ratio);
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
@@ -387,10 +388,10 @@ class GCTracer {
// Returns 0 if no events have been recorded.
double ContextDisposalRateInMilliseconds() const;
- // Computes the average survival rate based on the last recorded survival
+ // Computes the average survival ratio based on the last recorded survival
// events.
// Returns 0 if no events have been recorded.
- double AverageSurvivalRate() const;
+ double AverageSurvivalRatio() const;
// Returns true if at least one survival event was recorded.
bool SurvivalEventsRecorded() const;
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 549ecbc9a6..e81829c38d 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -27,13 +27,6 @@ void PromotionQueue::insert(HeapObject* target, int size) {
return;
}
- if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
- NewSpacePage* rear_page =
- NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
- DCHECK(!rear_page->prev_page()->is_anchor());
- rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
- }
-
if ((rear_ - 2) < limit_) {
RelocateQueueHead();
emergency_stack_->Add(Entry(target, size));
@@ -729,20 +722,6 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
}
-#ifdef VERIFY_HEAP
-NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() {
- Isolate* isolate = Isolate::Current();
- isolate->heap()->no_weak_object_verification_scope_depth_++;
-}
-
-
-NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
- Isolate* isolate = Isolate::Current();
- isolate->heap()->no_weak_object_verification_scope_depth_--;
-}
-#endif
-
-
GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
heap_->gc_callbacks_depth_++;
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 8dc77b7acc..e1817ba8b4 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -34,6 +34,10 @@
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
+#if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP
+#include "src/regexp-macro-assembler.h" // NOLINT
+#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
+#endif
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
@@ -63,7 +67,8 @@ Heap::Heap()
initial_semispace_size_(Page::kPageSize),
target_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
- initial_old_generation_size_(max_old_generation_size_ / 2),
+ initial_old_generation_size_(max_old_generation_size_ /
+ kInitalOldGenerationLimitFactor),
old_generation_size_configured_(false),
max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
@@ -77,7 +82,6 @@ Heap::Heap()
always_allocate_scope_depth_(0),
contexts_disposed_(0),
global_ic_age_(0),
- flush_monomorphic_ics_(false),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -132,9 +136,6 @@ Heap::Heap()
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
gcs_since_last_deopt_(0),
-#ifdef VERIFY_HEAP
- no_weak_object_verification_scope_depth_(0),
-#endif
allocation_sites_scratchpad_length_(0),
promotion_queue_(this),
configured_(false),
@@ -491,11 +492,11 @@ void Heap::ClearAllICsByKind(Code::Kind kind) {
}
-void Heap::RepairFreeListsAfterBoot() {
+void Heap::RepairFreeListsAfterDeserialization() {
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
- space->RepairFreeListsAfterBoot();
+ space->RepairFreeListsAfterDeserialization();
}
}
@@ -731,6 +732,49 @@ void Heap::GarbageCollectionEpilogue() {
}
+void Heap::HandleGCRequest() {
+ if (incremental_marking()->request_type() ==
+ IncrementalMarking::COMPLETE_MARKING) {
+ CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
+ return;
+ }
+ DCHECK(FLAG_overapproximate_weak_closure);
+ DCHECK(!incremental_marking()->weak_closure_was_overapproximated());
+ OverApproximateWeakClosure("GC interrupt");
+}
+
+
+void Heap::OverApproximateWeakClosure(const char* gc_reason) {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
+ gc_reason);
+ }
+ {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
+ }
+ }
+ mark_compact_collector()->OverApproximateWeakClosure();
+ incremental_marking()->set_should_hurry(false);
+ incremental_marking()->set_weak_closure_was_overapproximated(true);
+ {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
+ }
+ }
+}
+
+
void Heap::CollectAllGarbage(int flags, const char* gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
@@ -823,7 +867,8 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
+ if (!incremental_marking()->IsComplete() &&
+ !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
}
@@ -849,6 +894,9 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
}
GarbageCollectionEpilogue();
+ if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
+ isolate()->CheckDetachedContextsAfterGC();
+ }
tracer()->Stop(collector);
}
@@ -872,7 +920,6 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compiler_thread()->Flush();
}
- flush_monomorphic_ics_ = true;
AgeInlineCaches();
tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
return ++contexts_disposed_;
@@ -922,15 +969,6 @@ static void VerifyStringTable(Heap* heap) {
#endif // VERIFY_HEAP
-static bool AbortIncrementalMarkingAndCollectGarbage(
- Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
- heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
- bool result = heap->CollectGarbage(space, gc_reason);
- heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
- return result;
-}
-
-
bool Heap::ReserveSpace(Reservation* reservations) {
bool gc_performed = true;
int counter = 0;
@@ -956,14 +994,15 @@ bool Heap::ReserveSpace(Reservation* reservations) {
} else {
allocation = paged_space(space)->AllocateRaw(size);
}
- FreeListNode* node;
- if (allocation.To(&node)) {
+ HeapObject* free_space;
+ if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
- node->set_size(this, size);
+ Address free_space_address = free_space->address();
+ CreateFillerObjectAt(free_space_address, size);
DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
- chunk.start = node->address();
- chunk.end = node->address() + size;
+ chunk.start = free_space_address;
+ chunk.end = free_space_address + size;
} else {
perform_gc = true;
break;
@@ -972,12 +1011,18 @@ bool Heap::ReserveSpace(Reservation* reservations) {
}
if (perform_gc) {
if (space == NEW_SPACE) {
- Heap::CollectGarbage(NEW_SPACE,
- "failed to reserve space in the new space");
+ CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
} else {
- AbortIncrementalMarkingAndCollectGarbage(
- this, static_cast<AllocationSpace>(space),
- "failed to reserve space in paged or large object space");
+ if (counter > 1) {
+ CollectAllGarbage(
+ kReduceMemoryFootprintMask,
+ "failed to reserve space in paged or large "
+ "object space, trying to reduce memory footprint");
+ } else {
+ CollectAllGarbage(
+ kAbortIncrementalMarkingMask,
+ "failed to reserve space in paged or large object space");
+ }
}
gc_performed = true;
break; // Abort for-loop over spaces and retry.
@@ -1060,8 +1105,7 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
static_cast<double>(start_new_space_size) * 100);
double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
- tracer()->AddSurvivalRate(survival_rate);
-
+ tracer()->AddSurvivalRatio(survival_rate);
if (survival_rate > kYoungSurvivalRateHighThreshold) {
high_survival_rate_period_length_++;
} else {
@@ -1242,8 +1286,6 @@ void Heap::MarkCompactEpilogue() {
isolate_->counters()->objs_since_last_full()->Set(0);
- flush_monomorphic_ics_ = false;
-
incremental_marking()->Epilogue();
}
@@ -1412,14 +1454,14 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
void PromotionQueue::Initialize() {
- // Assumes that a NewSpacePage exactly fits a number of promotion queue
- // entries (where each is a pair of intptr_t). This allows us to simplify
- // the test fpr when to switch pages.
+ // The last to-space page may be used for promotion queue. On promotion
+ // conflict, we use the emergency stack.
DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
0);
- limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
front_ = rear_ =
reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+ limit_ = reinterpret_cast<intptr_t*>(
+ Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
emergency_stack_ = NULL;
}
@@ -1594,7 +1636,12 @@ void Heap::Scavenge() {
incremental_marking()->UpdateMarkingDequeAfterScavenge();
ScavengeWeakObjectRetainer weak_object_retainer(this);
- ProcessWeakReferences(&weak_object_retainer);
+ ProcessYoungWeakReferences(&weak_object_retainer);
+
+ // Collects callback info for handles referenced by young generation that are
+ // pending (about to be collected) and either phantom or internal-fields.
+ // Releases the global handles. See also PostGarbageCollectionProcessing.
+ isolate()->global_handles()->CollectYoungPhantomCallbackData();
DCHECK(new_space_front == new_space_.top());
@@ -1681,16 +1728,16 @@ void Heap::UpdateReferencesInExternalStringTable(
}
-void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
+void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
ProcessArrayBuffers(retainer);
ProcessNativeContexts(retainer);
- // TODO(mvstanton): AllocationSites only need to be processed during
- // MARK_COMPACT, as they live in old space. Verify and address.
ProcessAllocationSites(retainer);
- // Collects callback info for handles that are pending (about to be
- // collected) and either phantom or internal-fields. Releases the global
- // handles. See also PostGarbageCollectionProcessing.
- isolate()->global_handles()->CollectPhantomCallbackData();
+}
+
+
+void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
+ ProcessArrayBuffers(retainer);
+ ProcessNativeContexts(retainer);
}
@@ -2370,8 +2417,8 @@ void Heap::ConfigureInitialOldGenerationSize() {
old_generation_allocation_limit_ =
Max(kMinimumOldGenerationAllocationLimit,
static_cast<intptr_t>(
- static_cast<double>(initial_old_generation_size_) *
- (tracer()->AverageSurvivalRate() / 100)));
+ static_cast<double>(old_generation_allocation_limit_) *
+ (tracer()->AverageSurvivalRatio() / 100)));
}
}
@@ -2666,12 +2713,6 @@ bool Heap::CreateInitialMaps() {
set_native_source_string_map(Map::cast(obj));
}
- ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
- undetectable_string_map()->set_is_undetectable();
-
- ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
- undetectable_one_byte_string_map()->set_is_undetectable();
-
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
@@ -2910,8 +2951,8 @@ void Heap::CreateInitialObjects() {
set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
- set_nan_value(
- *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
+ set_nan_value(*factory->NewHeapNumber(
+ std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
// The hole has not been created yet, but we want to put something
@@ -3050,6 +3091,25 @@ void Heap::CreateInitialObjects() {
// Number of queued microtasks stored in Isolate::pending_microtask_count().
set_microtask_queue(empty_fixed_array());
+ if (FLAG_vector_ics) {
+ FeedbackVectorSpec spec(0, 1);
+ spec.SetKind(0, Code::KEYED_LOAD_IC);
+ Handle<TypeFeedbackVector> dummy_vector =
+ factory->NewTypeFeedbackVector(spec);
+ dummy_vector->Set(FeedbackVectorICSlot(0),
+ *TypeFeedbackVector::MegamorphicSentinel(isolate()),
+ SKIP_WRITE_BARRIER);
+ set_keyed_load_dummy_vector(*dummy_vector);
+ } else {
+ set_keyed_load_dummy_vector(empty_fixed_array());
+ }
+
+ set_detached_contexts(empty_fixed_array());
+
+ set_weak_object_to_code_table(
+ *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
+ TENURED));
+
Handle<SeededNumberDictionary> slow_element_dictionary =
SeededNumberDictionary::New(isolate(), 0, TENURED);
slow_element_dictionary->set_requires_slow_elements();
@@ -3390,13 +3450,18 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
+ // At this point, we may be deserializing the heap from a snapshot, and
+ // none of the maps have been created yet and are NULL.
if (size == kPointerSize) {
- filler->set_map_no_write_barrier(one_pointer_filler_map());
+ filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
+ DCHECK(filler->map() == NULL || filler->map() == one_pointer_filler_map());
} else if (size == 2 * kPointerSize) {
- filler->set_map_no_write_barrier(two_pointer_filler_map());
+ filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
+ DCHECK(filler->map() == NULL || filler->map() == two_pointer_filler_map());
} else {
- filler->set_map_no_write_barrier(free_space_map());
- FreeSpace::cast(filler)->set_size(size);
+ filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
+ DCHECK(filler->map() == NULL || filler->map() == free_space_map());
+ FreeSpace::cast(filler)->nobarrier_set_size(size);
}
}
@@ -3880,9 +3945,33 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
}
Address clone_address = clone->address();
CopyBlock(clone_address, source->address(), object_size);
- // Update write barrier for all fields that lie beyond the header.
- RecordWrites(clone_address, JSObject::kHeaderSize,
- (object_size - JSObject::kHeaderSize) / kPointerSize);
+
+ // Update write barrier for all tagged fields that lie beyond the header.
+ const int start_offset = JSObject::kHeaderSize;
+ const int end_offset = object_size;
+
+#if V8_DOUBLE_FIELDS_UNBOXING
+ LayoutDescriptorHelper helper(map);
+ bool has_only_tagged_fields = helper.all_fields_tagged();
+
+ if (!has_only_tagged_fields) {
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+ RecordWrites(clone_address, offset,
+ (end_of_region_offset - offset) / kPointerSize);
+ }
+ offset = end_of_region_offset;
+ }
+ } else {
+#endif
+ // Object has only tagged fields.
+ RecordWrites(clone_address, start_offset,
+ (end_offset - start_offset) / kPointerSize);
+#if V8_DOUBLE_FIELDS_UNBOXING
+ }
+#endif
+
} else {
wb_mode = SKIP_WRITE_BARRIER;
@@ -3949,9 +4038,9 @@ static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
int len) {
const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
- unsigned stream_length = vector.length();
+ size_t stream_length = vector.length();
while (stream_length != 0) {
- unsigned consumed = 0;
+ size_t consumed = 0;
uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
DCHECK(c != unibrow::Utf8::kBadChar);
DCHECK(consumed <= stream_length);
@@ -4488,6 +4577,7 @@ bool Heap::IdleNotification(int idle_time_in_ms) {
bool Heap::IdleNotification(double deadline_in_seconds) {
+ CHECK(HasBeenSetUp()); // http://crbug.com/425035
double deadline_in_ms =
deadline_in_seconds *
static_cast<double>(base::Time::kMillisecondsPerSecond);
@@ -5192,7 +5282,8 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
if (FLAG_initial_old_space_size > 0) {
initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
} else {
- initial_old_generation_size_ = max_old_generation_size_ / 2;
+ initial_old_generation_size_ =
+ max_old_generation_size_ / kInitalOldGenerationLimitFactor;
}
old_generation_allocation_limit_ = initial_old_generation_size_;
@@ -5446,12 +5537,11 @@ bool Heap::CreateHeapObjects() {
// Create initial objects
CreateInitialObjects();
- CHECK_EQ(0, gc_count_);
+ CHECK_EQ(0u, gc_count_);
set_native_contexts_list(undefined_value());
set_array_buffers_list(undefined_value());
set_allocation_sites_list(undefined_value());
- weak_object_to_code_table_ = undefined_value();
return true;
}
@@ -5471,7 +5561,17 @@ void Heap::SetStackLimits() {
}
-void Heap::NotifyDeserializationComplete() { deserialization_complete_ = true; }
+void Heap::NotifyDeserializationComplete() {
+ deserialization_complete_ = true;
+#ifdef DEBUG
+ // All pages right after bootstrapping must be marked as never-evacuate.
+ PagedSpaces spaces(this);
+ for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
+ PageIterator it(s);
+ while (it.has_next()) CHECK(it.next()->NeverEvacuate());
+ }
+#endif // DEBUG
+}
void Heap::TearDown() {
@@ -5626,41 +5726,25 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
// TODO(ishell): Find a better place for this.
-void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
+void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
Handle<DependentCode> dep) {
DCHECK(!InNewSpace(*obj));
DCHECK(!InNewSpace(*dep));
- // This handle scope keeps the table handle local to this function, which
- // allows us to safely skip write barriers in table update operations.
- HandleScope scope(isolate());
- Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
- isolate());
+ Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
table = WeakHashTable::Put(table, obj, dep);
-
- if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
- WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
- }
- set_weak_object_to_code_table(*table);
- DCHECK_EQ(*dep, table->Lookup(obj));
+ if (*table != weak_object_to_code_table())
+ set_weak_object_to_code_table(*table);
+ DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
}
-DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
- Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
+ Object* dep = weak_object_to_code_table()->Lookup(obj);
if (dep->IsDependentCode()) return DependentCode::cast(dep);
return DependentCode::cast(empty_fixed_array());
}
-void Heap::EnsureWeakObjectToCodeTable() {
- if (!weak_object_to_code_table()->IsHashTable()) {
- set_weak_object_to_code_table(
- *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
- TENURED));
- }
-}
-
-
void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index e6ccf2eaec..0b353b70f1 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -102,8 +102,6 @@ namespace internal {
V(Map, short_external_one_byte_internalized_string_map, \
ShortExternalOneByteInternalizedStringMap) \
V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
- V(Map, undetectable_string_map, UndetectableStringMap) \
- V(Map, undetectable_one_byte_string_map, UndetectableOneByteStringMap) \
V(Map, external_int8_array_map, ExternalInt8ArrayMap) \
V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \
V(Map, external_int16_array_map, ExternalInt16ArrayMap) \
@@ -182,7 +180,10 @@ namespace internal {
EmptySlowElementDictionary) \
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
- V(FixedArray, microtask_queue, MicrotaskQueue)
+ V(FixedArray, microtask_queue, MicrotaskQueue) \
+ V(FixedArray, keyed_load_dummy_vector, KeyedLoadDummyVector) \
+ V(FixedArray, detached_contexts, DetachedContexts) \
+ V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -199,83 +200,85 @@ namespace internal {
SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
-#define INTERNALIZED_STRING_LIST(V) \
- V(Object_string, "Object") \
- V(proto_string, "__proto__") \
- V(arguments_string, "arguments") \
- V(Arguments_string, "Arguments") \
- V(caller_string, "caller") \
- V(boolean_string, "boolean") \
- V(Boolean_string, "Boolean") \
- V(callee_string, "callee") \
- V(constructor_string, "constructor") \
- V(dot_result_string, ".result") \
- V(eval_string, "eval") \
- V(empty_string, "") \
- V(function_string, "function") \
- V(Function_string, "Function") \
- V(length_string, "length") \
- V(name_string, "name") \
- V(null_string, "null") \
- V(number_string, "number") \
- V(Number_string, "Number") \
- V(nan_string, "NaN") \
- V(source_string, "source") \
- V(source_url_string, "source_url") \
- V(source_mapping_url_string, "source_mapping_url") \
- V(global_string, "global") \
- V(ignore_case_string, "ignoreCase") \
- V(multiline_string, "multiline") \
- V(sticky_string, "sticky") \
- V(harmony_regexps_string, "harmony_regexps") \
- V(input_string, "input") \
- V(index_string, "index") \
- V(last_index_string, "lastIndex") \
- V(object_string, "object") \
- V(prototype_string, "prototype") \
- V(string_string, "string") \
- V(String_string, "String") \
- V(symbol_string, "symbol") \
- V(Symbol_string, "Symbol") \
- V(Map_string, "Map") \
- V(Set_string, "Set") \
- V(WeakMap_string, "WeakMap") \
- V(WeakSet_string, "WeakSet") \
- V(for_string, "for") \
- V(for_api_string, "for_api") \
- V(for_intern_string, "for_intern") \
- V(private_api_string, "private_api") \
- V(private_intern_string, "private_intern") \
- V(Date_string, "Date") \
- V(char_at_string, "CharAt") \
- V(undefined_string, "undefined") \
- V(value_of_string, "valueOf") \
- V(stack_string, "stack") \
- V(toJSON_string, "toJSON") \
- V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(stack_overflow_string, "kStackOverflowBoilerplate") \
- V(illegal_access_string, "illegal access") \
- V(cell_value_string, "%cell_value") \
- V(illegal_argument_string, "illegal argument") \
- V(identity_hash_string, "v8::IdentityHash") \
- V(closure_string, "(closure)") \
- V(dot_string, ".") \
- V(compare_ic_string, "==") \
- V(strict_compare_ic_string, "===") \
- V(infinity_string, "Infinity") \
- V(minus_infinity_string, "-Infinity") \
- V(query_colon_string, "(?:)") \
- V(Generator_string, "Generator") \
- V(throw_string, "throw") \
- V(done_string, "done") \
- V(value_string, "value") \
- V(next_string, "next") \
- V(byte_length_string, "byteLength") \
- V(byte_offset_string, "byteOffset") \
- V(minus_zero_string, "-0") \
- V(Array_string, "Array") \
- V(Error_string, "Error") \
+#define INTERNALIZED_STRING_LIST(V) \
+ V(Object_string, "Object") \
+ V(proto_string, "__proto__") \
+ V(arguments_string, "arguments") \
+ V(Arguments_string, "Arguments") \
+ V(caller_string, "caller") \
+ V(boolean_string, "boolean") \
+ V(Boolean_string, "Boolean") \
+ V(callee_string, "callee") \
+ V(constructor_string, "constructor") \
+ V(dot_result_string, ".result") \
+ V(eval_string, "eval") \
+ V(empty_string, "") \
+ V(function_string, "function") \
+ V(Function_string, "Function") \
+ V(length_string, "length") \
+ V(name_string, "name") \
+ V(null_string, "null") \
+ V(number_string, "number") \
+ V(Number_string, "Number") \
+ V(nan_string, "NaN") \
+ V(source_string, "source") \
+ V(source_url_string, "source_url") \
+ V(source_mapping_url_string, "source_mapping_url") \
+ V(global_string, "global") \
+ V(ignore_case_string, "ignoreCase") \
+ V(multiline_string, "multiline") \
+ V(sticky_string, "sticky") \
+ V(unicode_string, "unicode") \
+ V(harmony_regexps_string, "harmony_regexps") \
+ V(harmony_unicode_regexps_string, "harmony_unicode_regexps") \
+ V(input_string, "input") \
+ V(index_string, "index") \
+ V(last_index_string, "lastIndex") \
+ V(object_string, "object") \
+ V(prototype_string, "prototype") \
+ V(string_string, "string") \
+ V(String_string, "String") \
+ V(symbol_string, "symbol") \
+ V(Symbol_string, "Symbol") \
+ V(Map_string, "Map") \
+ V(Set_string, "Set") \
+ V(WeakMap_string, "WeakMap") \
+ V(WeakSet_string, "WeakSet") \
+ V(for_string, "for") \
+ V(for_api_string, "for_api") \
+ V(for_intern_string, "for_intern") \
+ V(private_api_string, "private_api") \
+ V(private_intern_string, "private_intern") \
+ V(Date_string, "Date") \
+ V(char_at_string, "CharAt") \
+ V(undefined_string, "undefined") \
+ V(value_of_string, "valueOf") \
+ V(stack_string, "stack") \
+ V(toJSON_string, "toJSON") \
+ V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
+ V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
+ V(stack_overflow_string, "kStackOverflowBoilerplate") \
+ V(illegal_access_string, "illegal access") \
+ V(cell_value_string, "%cell_value") \
+ V(illegal_argument_string, "illegal argument") \
+ V(identity_hash_string, "v8::IdentityHash") \
+ V(closure_string, "(closure)") \
+ V(dot_string, ".") \
+ V(compare_ic_string, "==") \
+ V(strict_compare_ic_string, "===") \
+ V(infinity_string, "Infinity") \
+ V(minus_infinity_string, "-Infinity") \
+ V(query_colon_string, "(?:)") \
+ V(Generator_string, "Generator") \
+ V(throw_string, "throw") \
+ V(done_string, "done") \
+ V(value_string, "value") \
+ V(next_string, "next") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(minus_zero_string, "-0") \
+ V(Array_string, "Array") \
+ V(Error_string, "Error") \
V(RegExp_string, "RegExp")
#define PRIVATE_SYMBOL_LIST(V) \
@@ -289,7 +292,6 @@ namespace internal {
V(uninitialized_symbol) \
V(megamorphic_symbol) \
V(premonomorphic_symbol) \
- V(generic_symbol) \
V(stack_trace_symbol) \
V(detailed_stack_trace_symbol) \
V(normal_ic_symbol) \
@@ -300,7 +302,10 @@ namespace internal {
V(promise_has_handler_symbol) \
V(class_script_symbol) \
V(class_start_position_symbol) \
- V(class_end_position_symbol)
+ V(class_end_position_symbol) \
+ V(error_start_pos_symbol) \
+ V(error_end_pos_symbol) \
+ V(error_script_symbol)
#define PUBLIC_SYMBOL_LIST(V) \
V(has_instance_symbol, symbolHasInstance, Symbol.hasInstance) \
@@ -396,6 +401,9 @@ class StoreBufferRebuilder {
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
+// The last page in to-space is used for the promotion queue. On conflict
+// during scavenge, the promotion queue is allocated externally and all
+// entries are copied to the external queue.
class PromotionQueue {
public:
explicit PromotionQueue(Heap* heap)
@@ -418,6 +426,12 @@ class PromotionQueue {
}
void SetNewLimit(Address limit) {
+ // If we are already using an emergency stack, we can ignore it.
+ if (emergency_stack_) return;
+
+ // If the limit is not on the same page, we can ignore it.
+ if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
+
limit_ = reinterpret_cast<intptr_t*>(limit);
if (limit_ <= rear_) {
@@ -428,6 +442,10 @@ class PromotionQueue {
}
bool IsBelowPromotionQueue(Address to_space_top) {
+ // If an emergency stack is used, the to-space address cannot interfere
+ // with the promotion queue.
+ if (emergency_stack_) return true;
+
// If the given to-space top pointer and the head of the promotion queue
// are not on the same page, then the to-space objects are below the
// promotion queue.
@@ -455,12 +473,6 @@ class PromotionQueue {
return;
}
- if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
- NewSpacePage* front_page =
- NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
- DCHECK(!front_page->prev_page()->is_anchor());
- front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
- }
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
// Assert no underflow.
@@ -690,8 +702,8 @@ class Heap {
// Iterates the whole code space to clear all ICs of the given kind.
void ClearAllICsByKind(Code::Kind kind);
- // For use during bootup.
- void RepairFreeListsAfterBoot();
+ // FreeSpace objects have a null map after deserialization. Update the map.
+ void RepairFreeListsAfterDeserialization();
template <typename T>
static inline bool IsOneByte(T t, int chars);
@@ -753,6 +765,15 @@ class Heap {
// Making the heap iterable requires us to abort incremental marking.
static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
+ // Invoked when GC was requested via the stack guard.
+ void HandleGCRequest();
+
+ // Attempt to over-approximate the weak closure by marking object groups and
+ // implicit references from global handles, but don't atomically complete
+ // marking. If we continue to mark incrementally, we might have marked
+ // objects that die later.
+ void OverApproximateWeakClosure(const char* gc_reason);
+
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
@@ -846,8 +867,6 @@ class Heap {
// Used in CreateAllocationSiteStub and the (de)serializer.
Object** allocation_sites_list_address() { return &allocation_sites_list_; }
- Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
-
void set_encountered_weak_collections(Object* weak_collection) {
encountered_weak_collections_ = weak_collection;
}
@@ -952,11 +971,6 @@ class Heap {
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
void Verify();
-
-
- bool weak_embedded_objects_verification_enabled() {
- return no_weak_object_verification_scope_depth_ == 0;
- }
#endif
#ifdef DEBUG
@@ -1070,6 +1084,8 @@ class Heap {
static const intptr_t kMinimumOldGenerationAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
+ static const int kInitalOldGenerationLimitFactor = 2;
+
static const int kPointerMultiplier = i::kPointerSize / 4;
// The new space size has to be a power of 2. Sizes are in MB.
@@ -1222,7 +1238,8 @@ class Heap {
void UpdateReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
- void ProcessWeakReferences(WeakObjectRetainer* retainer);
+ void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
+ void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
@@ -1344,8 +1361,6 @@ class Heap {
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
}
- bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
-
int64_t amount_of_external_allocated_memory() {
return amount_of_external_allocated_memory_;
}
@@ -1413,16 +1428,10 @@ class Heap {
Heap* heap_;
};
- void AddWeakObjectToCodeDependency(Handle<Object> obj,
+ void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
Handle<DependentCode> dep);
- DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj);
-
- void InitializeWeakObjectToCodeTable() {
- set_weak_object_to_code_table(undefined_value());
- }
-
- void EnsureWeakObjectToCodeTable();
+ DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
static void FatalProcessOutOfMemory(const char* location,
bool take_snapshot = false);
@@ -1484,6 +1493,10 @@ class Heap {
MUST_USE_RESULT AllocationResult
AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
+ static const int kInitialStringTableSize = 2048;
+ static const int kInitialEvalCacheSize = 64;
+ static const int kInitialNumberStringCacheSize = 256;
+
private:
Heap();
@@ -1528,8 +1541,6 @@ class Heap {
int global_ic_age_;
- bool flush_monomorphic_ics_;
-
int scan_on_scavenge_pages_;
NewSpace new_space_;
@@ -1607,11 +1618,6 @@ class Heap {
Object* array_buffers_list_;
Object* allocation_sites_list_;
- // WeakHashTable that maps objects embedded in optimized code to dependent
- // code list. It is initilized lazily and contains the undefined_value at
- // start.
- Object* weak_object_to_code_table_;
-
// List of encountered weak collections (JSWeakMap and JSWeakSet) during
// marking. It is initialized during marking, destroyed after marking and
// contains Smi(0) while marking is not active.
@@ -2028,23 +2034,10 @@ class Heap {
void ClearObjectStats(bool clear_last_time_stats = false);
- void set_weak_object_to_code_table(Object* value) {
- DCHECK(!InNewSpace(value));
- weak_object_to_code_table_ = value;
- }
-
- Object** weak_object_to_code_table_address() {
- return &weak_object_to_code_table_;
- }
-
inline void UpdateAllocationsHash(HeapObject* object);
inline void UpdateAllocationsHash(uint32_t value);
inline void PrintAlloctionsHash();
- static const int kInitialStringTableSize = 2048;
- static const int kInitialEvalCacheSize = 64;
- static const int kInitialNumberStringCacheSize = 256;
-
// Object counts and used memory by InstanceType
size_t object_counts_[OBJECT_STATS_COUNT];
size_t object_counts_last_time_[OBJECT_STATS_COUNT];
@@ -2092,10 +2085,6 @@ class Heap {
// deoptimization triggered by garbage collection.
int gcs_since_last_deopt_;
-#ifdef VERIFY_HEAP
- int no_weak_object_verification_scope_depth_;
-#endif
-
static const int kAllocationSiteScratchpadSize = 256;
int allocation_sites_scratchpad_length_;
@@ -2131,9 +2120,6 @@ class Heap {
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class MapCompact;
-#ifdef VERIFY_HEAP
- friend class NoWeakObjectVerificationScope;
-#endif
friend class Page;
DISALLOW_COPY_AND_ASSIGN(Heap);
@@ -2187,15 +2173,6 @@ class AlwaysAllocateScope {
};
-#ifdef VERIFY_HEAP
-class NoWeakObjectVerificationScope {
- public:
- inline NoWeakObjectVerificationScope();
- inline ~NoWeakObjectVerificationScope();
-};
-#endif
-
-
class GCCallbacksScope {
public:
explicit inline GCCallbacksScope(Heap* heap);
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index aadd17c94b..0ac8e56860 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -28,7 +28,9 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
idle_marking_delay_counter_(0),
no_marking_scope_depth_(0),
unscanned_bytes_of_large_object_(0),
- was_activated_(false) {}
+ was_activated_(false),
+ weak_closure_was_overapproximated_(false),
+ request_type_(COMPLETE_MARKING) {}
void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
@@ -550,8 +552,6 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
- heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
-
// Ready to start incremental marking.
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Running\n");
@@ -655,10 +655,7 @@ intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
int size = obj->SizeFromMap(map);
unscanned_bytes_of_large_object_ = 0;
VisitObject(map, obj, size);
- int delta = (size - unscanned_bytes_of_large_object_);
- // TODO(jochen): remove after http://crbug.com/381820 is resolved.
- CHECK_LT(0, delta);
- bytes_processed += delta;
+ bytes_processed += size - unscanned_bytes_of_large_object_;
}
return bytes_processed;
}
@@ -774,6 +771,18 @@ void IncrementalMarking::Finalize() {
}
+void IncrementalMarking::OverApproximateWeakClosure() {
+ DCHECK(FLAG_overapproximate_weak_closure);
+ DCHECK(!weak_closure_was_overapproximated_);
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] requesting weak closure overapproximation.\n");
+ }
+ set_should_hurry(true);
+ request_type_ = OVERAPPROXIMATION;
+ heap_->isolate()->stack_guard()->RequestGC();
+}
+
+
void IncrementalMarking::MarkingComplete(CompletionAction action) {
state_ = COMPLETE;
// We will set the stack guard to request a GC now. This will mean the rest
@@ -786,12 +795,16 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
PrintF("[IncrementalMarking] Complete (normal).\n");
}
if (action == GC_VIA_STACK_GUARD) {
+ request_type_ = COMPLETE_MARKING;
heap_->isolate()->stack_guard()->RequestGC();
}
}
-void IncrementalMarking::Epilogue() { was_activated_ = false; }
+void IncrementalMarking::Epilogue() {
+ was_activated_ = false;
+ weak_closure_was_overapproximated_ = false;
+}
void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
@@ -934,7 +947,13 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
if (completion == FORCE_COMPLETION ||
IsIdleMarkingDelayCounterLimitReached()) {
- MarkingComplete(action);
+ if (FLAG_overapproximate_weak_closure &&
+ !weak_closure_was_overapproximated_ &&
+ action == GC_VIA_STACK_GUARD) {
+ OverApproximateWeakClosure();
+ } else {
+ MarkingComplete(action);
+ }
} else {
IncrementIdleMarkingDelayCounter();
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 56c5a24c2c..d6dfe17c7f 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -24,6 +24,8 @@ class IncrementalMarking {
enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
+ enum GCRequestType { COMPLETE_MARKING, OVERAPPROXIMATION };
+
explicit IncrementalMarking(Heap* heap);
static void Initialize();
@@ -36,6 +38,13 @@ class IncrementalMarking {
bool should_hurry() { return should_hurry_; }
void set_should_hurry(bool val) { should_hurry_ = val; }
+ bool weak_closure_was_overapproximated() const {
+ return weak_closure_was_overapproximated_;
+ }
+ void set_weak_closure_was_overapproximated(bool val) {
+ weak_closure_was_overapproximated_ = val;
+ }
+
inline bool IsStopped() { return state() == STOPPED; }
INLINE(bool IsMarking()) { return state() >= MARKING; }
@@ -44,6 +53,8 @@ class IncrementalMarking {
inline bool IsComplete() { return state() == COMPLETE; }
+ GCRequestType request_type() const { return request_type_; }
+
bool WorthActivating();
bool ShouldActivate();
@@ -66,6 +77,8 @@ class IncrementalMarking {
void Abort();
+ void OverApproximateWeakClosure();
+
void MarkingComplete(CompletionAction action);
void Epilogue();
@@ -228,6 +241,10 @@ class IncrementalMarking {
bool was_activated_;
+ bool weak_closure_was_overapproximated_;
+
+ GCRequestType request_type_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index c9a310a3f7..b525bf6ac2 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -242,6 +242,7 @@ void MarkCompactCollector::TearDown() {
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
+ DCHECK(!p->NeverEvacuate());
p->MarkEvacuationCandidate();
evacuation_candidates_.Add(p);
}
@@ -301,16 +302,17 @@ void MarkCompactCollector::CollectGarbage() {
MarkLiveObjects();
DCHECK(heap_->incremental_marking()->IsStopped());
- if (FLAG_collect_maps) ClearNonLiveReferences();
-
+ // ClearNonLiveReferences can deoptimize code in dependent code arrays.
+ // Process weak cells before so that weak cells in dependent code
+ // arrays are cleared or contain only live code objects.
ProcessAndClearWeakCells();
+ if (FLAG_collect_maps) ClearNonLiveReferences();
+
ClearWeakCollections();
heap_->set_encountered_weak_cells(Smi::FromInt(0));
- isolate()->global_handles()->CollectPhantomCallbackData();
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyMarking(heap_);
@@ -320,9 +322,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepSpaces();
#ifdef VERIFY_HEAP
- if (heap()->weak_embedded_objects_verification_enabled()) {
- VerifyWeakEmbeddedObjectsInCode();
- }
+ VerifyWeakEmbeddedObjectsInCode();
if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
VerifyOmittedMapChecks();
}
@@ -720,10 +720,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Candidate* least = NULL;
PageIterator it(space);
- if (it.has_next()) it.next(); // Never compact the first page.
-
while (it.has_next()) {
Page* p = it.next();
+ if (p->NeverEvacuate()) continue;
p->ClearEvacuationCandidate();
if (FLAG_stress_compaction) {
@@ -1959,8 +1958,6 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Handle the string table specially.
MarkStringTable(visitor);
- MarkWeakObjectToCodeTable();
-
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque_.overflowed()) {
RefillMarkingDeque();
@@ -2001,16 +1998,6 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
}
-void MarkCompactCollector::MarkWeakObjectToCodeTable() {
- HeapObject* weak_object_to_code_table =
- HeapObject::cast(heap()->weak_object_to_code_table());
- if (!IsMarked(weak_object_to_code_table)) {
- MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
- SetMark(weak_object_to_code_table, mark);
- }
-}
-
-
// Mark all objects reachable from the objects on the marking stack.
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
@@ -2130,11 +2117,12 @@ void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() {
marking_deque_memory_ = new base::VirtualMemory(4 * MB);
}
if (!marking_deque_memory_committed_) {
- bool success = marking_deque_memory_->Commit(
- reinterpret_cast<Address>(marking_deque_memory_->address()),
- marking_deque_memory_->size(),
- false); // Not executable.
- CHECK(success);
+ if (!marking_deque_memory_->Commit(
+ reinterpret_cast<Address>(marking_deque_memory_->address()),
+ marking_deque_memory_->size(),
+ false)) { // Not executable.
+ V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
+ }
marking_deque_memory_committed_ = true;
InitializeMarkingDeque();
}
@@ -2162,6 +2150,21 @@ void MarkCompactCollector::UncommitMarkingDeque() {
}
+void MarkCompactCollector::OverApproximateWeakClosure() {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_WEAKCLOSURE);
+
+ RootMarkingVisitor root_visitor(heap());
+ isolate()->global_handles()->IterateObjectGroups(
+ &root_visitor, &IsUnmarkedHeapObjectWithHeap);
+ MarkImplicitRefGroups();
+
+ // Remove object groups after marking phase.
+ heap()->isolate()->global_handles()->RemoveObjectGroups();
+ heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
+}
+
+
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
double start_time = 0.0;
@@ -2276,7 +2279,7 @@ void MarkCompactCollector::AfterMarking() {
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer;
- heap()->ProcessWeakReferences(&mark_compact_object_retainer);
+ heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
// Remove object groups after marking phase.
heap()->isolate()->global_handles()->RemoveObjectGroups();
@@ -2313,67 +2316,29 @@ void MarkCompactCollector::ClearNonLiveReferences() {
ClearNonLivePrototypeTransitions(map);
ClearNonLiveMapTransitions(map, map_mark);
- if (map_mark.Get()) {
- ClearNonLiveDependentCode(map->dependent_code());
- } else {
- ClearDependentCode(map->dependent_code());
+ if (!map_mark.Get()) {
+ have_code_to_deoptimize_ |=
+ map->dependent_code()->MarkCodeForDeoptimization(
+ isolate(), DependentCode::kWeakCodeGroup);
map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
}
- // Iterate over property cell space, removing dependent code that is not
- // otherwise kept alive by strong references.
- HeapObjectIterator cell_iterator(heap_->property_cell_space());
- for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
- cell = cell_iterator.Next()) {
- if (IsMarked(cell)) {
- ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
- }
- }
-
- // Iterate over allocation sites, removing dependent code that is not
- // otherwise kept alive by strong references.
- Object* undefined = heap()->undefined_value();
- for (Object* site = heap()->allocation_sites_list(); site != undefined;
- site = AllocationSite::cast(site)->weak_next()) {
- if (IsMarked(site)) {
- ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
- }
- }
-
- if (heap_->weak_object_to_code_table()->IsHashTable()) {
- WeakHashTable* table =
- WeakHashTable::cast(heap_->weak_object_to_code_table());
- uint32_t capacity = table->Capacity();
- for (uint32_t i = 0; i < capacity; i++) {
- uint32_t key_index = table->EntryToIndex(i);
- Object* key = table->get(key_index);
- if (!table->IsKey(key)) continue;
- uint32_t value_index = table->EntryToValueIndex(i);
- Object* value = table->get(value_index);
- if (key->IsCell() && !IsMarked(key)) {
- Cell* cell = Cell::cast(key);
- Object* object = cell->value();
- if (IsMarked(object)) {
- MarkBit mark = Marking::MarkBitFrom(cell);
- SetMark(cell, mark);
- Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
- RecordSlot(value_slot, value_slot, *value_slot);
- }
- }
- if (IsMarked(key)) {
- if (!IsMarked(value)) {
- HeapObject* obj = HeapObject::cast(value);
- MarkBit mark = Marking::MarkBitFrom(obj);
- SetMark(obj, mark);
- }
- ClearNonLiveDependentCode(DependentCode::cast(value));
- } else {
- ClearDependentCode(DependentCode::cast(value));
- table->set(key_index, heap_->the_hole_value());
- table->set(value_index, heap_->the_hole_value());
- table->ElementRemoved();
- }
+ WeakHashTable* table = heap_->weak_object_to_code_table();
+ uint32_t capacity = table->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ uint32_t key_index = table->EntryToIndex(i);
+ Object* key = table->get(key_index);
+ if (!table->IsKey(key)) continue;
+ uint32_t value_index = table->EntryToValueIndex(i);
+ Object* value = table->get(value_index);
+ if (WeakCell::cast(key)->cleared()) {
+ have_code_to_deoptimize_ |=
+ DependentCode::cast(value)->MarkCodeForDeoptimization(
+ isolate(), DependentCode::kWeakCodeGroup);
+ table->set(key_index, heap_->the_hole_value());
+ table->set(value_index, heap_->the_hole_value());
+ table->ElementRemoved();
}
}
}
@@ -2383,25 +2348,15 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
int number_of_transitions = map->NumberOfProtoTransitions();
FixedArray* prototype_transitions = map->GetPrototypeTransitions();
- int new_number_of_transitions = 0;
const int header = Map::kProtoTransitionHeaderSize;
- const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
- const int map_offset = header + Map::kProtoTransitionMapOffset;
- const int step = Map::kProtoTransitionElementsPerEntry;
+ int new_number_of_transitions = 0;
for (int i = 0; i < number_of_transitions; i++) {
- Object* prototype = prototype_transitions->get(proto_offset + i * step);
- Object* cached_map = prototype_transitions->get(map_offset + i * step);
- if (IsMarked(prototype) && IsMarked(cached_map)) {
- DCHECK(!prototype->IsUndefined());
- int proto_index = proto_offset + new_number_of_transitions * step;
- int map_index = map_offset + new_number_of_transitions * step;
+ Object* cached_map = prototype_transitions->get(header + i);
+ if (IsMarked(cached_map)) {
if (new_number_of_transitions != i) {
- prototype_transitions->set(proto_index, prototype,
- UPDATE_WRITE_BARRIER);
- prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
+ prototype_transitions->set(header + new_number_of_transitions,
+ cached_map, SKIP_WRITE_BARRIER);
}
- Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
- RecordSlot(slot, slot, prototype);
new_number_of_transitions++;
}
}
@@ -2411,8 +2366,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
}
// Fill slots that became free with undefined value.
- for (int i = new_number_of_transitions * step;
- i < number_of_transitions * step; i++) {
+ for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
prototype_transitions->set_undefined(header + i);
}
}
@@ -2545,70 +2499,6 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
}
-void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
- DisallowHeapAllocation no_allocation;
- DependentCode::GroupStartIndexes starts(entries);
- int number_of_entries = starts.number_of_entries();
- if (number_of_entries == 0) return;
- int g = DependentCode::kWeakCodeGroup;
- for (int i = starts.at(g); i < starts.at(g + 1); i++) {
- // If the entry is compilation info then the map must be alive,
- // and ClearDependentCode shouldn't be called.
- DCHECK(entries->is_code_at(i));
- Code* code = entries->code_at(i);
- if (IsMarked(code) && !code->marked_for_deoptimization()) {
- DependentCode::SetMarkedForDeoptimization(
- code, static_cast<DependentCode::DependencyGroup>(g));
- code->InvalidateEmbeddedObjects();
- have_code_to_deoptimize_ = true;
- }
- }
- for (int i = 0; i < number_of_entries; i++) {
- entries->clear_at(i);
- }
-}
-
-
-int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
- DependentCode* entries, int group, int start, int end, int new_start) {
- int survived = 0;
- for (int i = start; i < end; i++) {
- Object* obj = entries->object_at(i);
- DCHECK(obj->IsCode() || IsMarked(obj));
- if (IsMarked(obj) &&
- (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
- if (new_start + survived != i) {
- entries->set_object_at(new_start + survived, obj);
- }
- Object** slot = entries->slot_at(new_start + survived);
- RecordSlot(slot, slot, obj);
- survived++;
- }
- }
- entries->set_number_of_entries(
- static_cast<DependentCode::DependencyGroup>(group), survived);
- return survived;
-}
-
-
-void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
- DisallowHeapAllocation no_allocation;
- DependentCode::GroupStartIndexes starts(entries);
- int number_of_entries = starts.number_of_entries();
- if (number_of_entries == 0) return;
- int new_number_of_entries = 0;
- // Go through all groups, remove dead codes and compact.
- for (int g = 0; g < DependentCode::kGroupCount; g++) {
- int survived = ClearNonLiveDependentCodeInGroup(
- entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
- new_number_of_entries += survived;
- }
- for (int i = new_number_of_entries; i < number_of_entries; i++) {
- entries->clear_at(i);
- }
-}
-
-
void MarkCompactCollector::ProcessWeakCollections() {
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
@@ -2684,10 +2574,31 @@ void MarkCompactCollector::ProcessAndClearWeakCells() {
// cannot be a Smi here.
HeapObject* value = HeapObject::cast(weak_cell->value());
if (!MarkCompactCollector::IsMarked(value)) {
- weak_cell->clear();
+ // Cells for new-space objects embedded in optimized code are wrapped in
+ // WeakCell and put into Heap::weak_object_to_code_table.
+ // Such cells do not have any strong references but we want to keep them
+ // alive as long as the cell value is alive.
+ // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
+ if (value->IsCell()) {
+ Object* cell_value = Cell::cast(value)->value();
+ if (cell_value->IsHeapObject() &&
+ MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
+ // Resurrect the cell.
+ MarkBit mark = Marking::MarkBitFrom(value);
+ SetMark(value, mark);
+ Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
+ RecordSlot(slot, slot, *slot);
+ slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
+ RecordSlot(slot, slot, *slot);
+ } else {
+ weak_cell->clear();
+ }
+ } else {
+ weak_cell->clear();
+ }
} else {
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- heap()->mark_compact_collector()->RecordSlot(slot, slot, value);
+ RecordSlot(slot, slot, *slot);
}
weak_cell_obj = weak_cell->next();
weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
@@ -2881,6 +2792,14 @@ class PointersUpdatingVisitor : public ObjectVisitor {
HeapObject* heap_obj = HeapObject::cast(obj);
+// TODO(ishell): remove, once crbug/454297 is caught.
+#if V8_TARGET_ARCH_64_BIT
+ const uintptr_t kBoundary = V8_UINT64_C(1) << 48;
+ STATIC_ASSERT(kBoundary > 0);
+ if (reinterpret_cast<uintptr_t>(heap_obj->address()) >= kBoundary) {
+ CheckLayoutDescriptorAndDie(heap, slot);
+ }
+#endif
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
DCHECK(heap->InFromSpace(heap_obj) ||
@@ -2898,10 +2817,106 @@ class PointersUpdatingVisitor : public ObjectVisitor {
private:
inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
+ static void CheckLayoutDescriptorAndDie(Heap* heap, Object** slot);
+
Heap* heap_;
};
+#if V8_TARGET_ARCH_64_BIT
+// TODO(ishell): remove, once crbug/454297 is caught.
+void PointersUpdatingVisitor::CheckLayoutDescriptorAndDie(Heap* heap,
+ Object** slot) {
+ const int kDataBufferSize = 128;
+ uintptr_t data[kDataBufferSize] = {0};
+ int index = 0;
+ data[index++] = 0x10aaaaaaaaUL; // begin marker
+
+ data[index++] = reinterpret_cast<uintptr_t>(slot);
+ data[index++] = 0x15aaaaaaaaUL;
+
+ Address slot_address = reinterpret_cast<Address>(slot);
+
+ uintptr_t space_owner_id = 0xb001;
+ if (heap->new_space()->ToSpaceContains(slot_address)) {
+ space_owner_id = 1;
+ } else if (heap->new_space()->FromSpaceContains(slot_address)) {
+ space_owner_id = 2;
+ } else if (heap->old_pointer_space()->ContainsSafe(slot_address)) {
+ space_owner_id = 3;
+ } else if (heap->old_data_space()->ContainsSafe(slot_address)) {
+ space_owner_id = 4;
+ } else if (heap->code_space()->ContainsSafe(slot_address)) {
+ space_owner_id = 5;
+ } else if (heap->map_space()->ContainsSafe(slot_address)) {
+ space_owner_id = 6;
+ } else if (heap->cell_space()->ContainsSafe(slot_address)) {
+ space_owner_id = 7;
+ } else if (heap->property_cell_space()->ContainsSafe(slot_address)) {
+ space_owner_id = 8;
+ } else {
+ // Lo space or other.
+ space_owner_id = 9;
+ }
+ data[index++] = space_owner_id;
+ data[index++] = 0x20aaaaaaaaUL;
+
+ // Find map word lying near before the slot address (usually the map word is
+ // at -3 words from the slot but just in case we look up further.
+ Object** map_slot = slot;
+ bool found = false;
+ const int kMaxDistanceToMap = 64;
+ for (int i = 0; i < kMaxDistanceToMap; i++, map_slot--) {
+ Address map_address = reinterpret_cast<Address>(*map_slot);
+ if (heap->map_space()->ContainsSafe(map_address)) {
+ found = true;
+ break;
+ }
+ }
+ data[index++] = found;
+ data[index++] = 0x30aaaaaaaaUL;
+ data[index++] = reinterpret_cast<uintptr_t>(map_slot);
+ data[index++] = 0x35aaaaaaaaUL;
+
+ if (found) {
+ Address obj_address = reinterpret_cast<Address>(map_slot);
+ Address end_of_page =
+ reinterpret_cast<Address>(Page::FromAddress(obj_address)) +
+ Page::kPageSize;
+ Address end_address =
+ Min(obj_address + kPointerSize * kMaxDistanceToMap, end_of_page);
+ int size = static_cast<int>(end_address - obj_address);
+ data[index++] = size / kPointerSize;
+ data[index++] = 0x40aaaaaaaaUL;
+ memcpy(&data[index], reinterpret_cast<void*>(map_slot), size);
+ index += size / kPointerSize;
+ data[index++] = 0x50aaaaaaaaUL;
+
+ HeapObject* object = HeapObject::FromAddress(obj_address);
+ data[index++] = reinterpret_cast<uintptr_t>(object);
+ data[index++] = 0x60aaaaaaaaUL;
+
+ Map* map = object->map();
+ data[index++] = reinterpret_cast<uintptr_t>(map);
+ data[index++] = 0x70aaaaaaaaUL;
+
+ LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+ data[index++] = reinterpret_cast<uintptr_t>(layout_descriptor);
+ data[index++] = 0x80aaaaaaaaUL;
+
+ memcpy(&data[index], reinterpret_cast<void*>(map->address()), Map::kSize);
+ index += Map::kSize / kPointerSize;
+ data[index++] = 0x90aaaaaaaaUL;
+ }
+
+ data[index++] = 0xeeeeeeeeeeUL;
+ DCHECK(index < kDataBufferSize);
+ base::OS::PrintError("Data: %p\n", static_cast<void*>(data));
+ base::OS::Abort();
+}
+#endif
+
+
static void UpdatePointer(HeapObject** address, HeapObject* object) {
Address new_addr = Memory::Address_at(object->address());
@@ -3538,20 +3553,18 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
heap_->string_table()->Iterate(&updating_visitor);
- updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
- if (heap_->weak_object_to_code_table()->IsHashTable()) {
- WeakHashTable* table =
- WeakHashTable::cast(heap_->weak_object_to_code_table());
- table->Iterate(&updating_visitor);
- table->Rehash(heap_->isolate()->factory()->undefined_value());
- }
// Update pointers from external string table.
heap_->UpdateReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
EvacuationWeakObjectRetainer evacuation_object_retainer;
- heap()->ProcessWeakReferences(&evacuation_object_retainer);
+ heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
+
+ // Collects callback info for handles that are pending (about to be
+ // collected) and either phantom or internal-fields. Releases the global
+ // handles. See also PostGarbageCollectionProcessing.
+ isolate()->global_handles()->CollectAllPhantomCallbackData();
// Visit invalidated code (we ignored all slots on it) and clear mark-bits
// under it.
@@ -3561,6 +3574,10 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
DCHECK(migration_slots_buffer_ == NULL);
+
+ // The hashing of weak_object_to_code_table is no longer valid.
+ heap()->weak_object_to_code_table()->Rehash(
+ heap()->isolate()->factory()->undefined_value());
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index e26e06c24f..589bebf63f 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -168,6 +168,8 @@ class MarkingDeque {
// heap.
INLINE(void PushBlack(HeapObject* object)) {
DCHECK(object->IsHeapObject());
+ // TODO(jochen): Remove again before we branch for 4.2.
+ CHECK(object->IsHeapObject() && object->map()->IsMap());
if (IsFull()) {
Marking::BlackToGrey(object);
MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
@@ -180,6 +182,8 @@ class MarkingDeque {
INLINE(void PushGrey(HeapObject* object)) {
DCHECK(object->IsHeapObject());
+ // TODO(jochen): Remove again before we branch for 4.2.
+ CHECK(object->IsHeapObject() && object->map()->IsMap());
if (IsFull()) {
SetOverflowed();
} else {
@@ -262,6 +266,11 @@ class SlotsBuffer {
void Add(ObjectSlot slot) {
DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
+#ifdef DEBUG
+ if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
+ DCHECK_NOT_NULL(*slot);
+ }
+#endif
slots_[idx_++] = slot;
}
@@ -647,10 +656,6 @@ class MarkCompactCollector {
bool evacuation() const { return evacuation_; }
- // Mark the global table which maps weak objects to dependent code without
- // marking its contents.
- void MarkWeakObjectToCodeTable();
-
// Special case for processing weak references in a full collection. We need
// to artificially keep AllocationSites alive for a time.
void MarkAllocationSite(AllocationSite* site);
@@ -663,6 +668,8 @@ class MarkCompactCollector {
void UncommitMarkingDeque();
+ void OverApproximateWeakClosure();
+
private:
class SweeperTask;
@@ -807,11 +814,6 @@ class MarkCompactCollector {
int number_of_own_descriptors);
void TrimEnumCache(Map* map, DescriptorArray* descriptors);
- void ClearDependentCode(DependentCode* dependent_code);
- void ClearNonLiveDependentCode(DependentCode* dependent_code);
- int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
- int start, int end, int new_start);
-
// Mark all values associated with reachable keys in weak collections
// encountered so far. This might push new object or even new weak maps onto
// the marking stack.
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index e6334f365b..58afeae016 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -262,10 +262,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
- !target->is_call_stub() &&
- ((heap->flush_monomorphic_ics() && !target->embeds_maps_weakly()) ||
- heap->isolate()->serializer_enabled() ||
- target->ic_age() != heap->global_ic_age())) {
+ !target->is_call_stub() && (heap->isolate()->serializer_enabled() ||
+ target->ic_age() != heap->global_ic_age())) {
ICUtility::Clear(heap->isolate(), rinfo->pc(),
rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -329,19 +327,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
- Object** slot =
- HeapObject::RawField(object, PropertyCell::kDependentCodeOffset);
- if (FLAG_collect_maps) {
- // Mark property cell dependent codes array but do not push it onto marking
- // stack, this will make references from it weak. We will clean dead
- // codes when we iterate over property cells in ClearNonLiveReferences.
- HeapObject* obj = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
- StaticVisitor::MarkObjectWithoutPush(heap, obj);
- } else {
- StaticVisitor::VisitPointer(heap, slot);
- }
-
StaticVisitor::VisitPointers(
heap,
HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
@@ -370,20 +355,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
- Object** slot =
- HeapObject::RawField(object, AllocationSite::kDependentCodeOffset);
- if (FLAG_collect_maps) {
- // Mark allocation site dependent codes array but do not push it onto
- // marking stack, this will make references from it weak. We will clean
- // dead codes when we iterate over allocation sites in
- // ClearNonLiveReferences.
- HeapObject* obj = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
- StaticVisitor::MarkObjectWithoutPush(heap, obj);
- } else {
- StaticVisitor::VisitPointer(heap, slot);
- }
-
StaticVisitor::VisitPointers(
heap,
HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
@@ -446,7 +417,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
shared->ResetForNewContext(heap->global_ic_age());
}
if (FLAG_cleanup_code_caches_at_gc) {
- shared->ClearTypeFeedbackInfo();
+ shared->ClearTypeFeedbackInfoAtGCTime();
}
if (FLAG_cache_optimized_code && FLAG_flush_optimized_code_cache &&
!shared->optimized_code_map()->IsSmi()) {
@@ -646,14 +617,6 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
descriptors->GetDescriptorEndSlot(end));
}
- // Mark prototype dependent codes array but do not push it onto marking
- // stack, this will make references from it weak. We will clean dead
- // codes when we iterate over maps in ClearNonLiveTransitions.
- Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset);
- HeapObject* obj = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
- StaticVisitor::MarkObjectWithoutPush(heap, obj);
-
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 20d92de2f9..7b2e2d9a38 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -303,8 +303,13 @@ struct WeakListVisitor<Context> {
// Process the three weak lists linked off the context.
DoWeakList<JSFunction>(heap, context, retainer,
Context::OPTIMIZED_FUNCTIONS_LIST);
- DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
- DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
+
+ // Code objects are always allocated in Code space, we do not have to visit
+ // them during scavenges.
+ if (heap->gc_state() == Heap::MARK_COMPACT) {
+ DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
+ DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
+ }
}
template <class T>
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index d81d253e1b..cfa23255c4 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -91,6 +91,12 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
cur_addr_ += obj_size;
DCHECK(cur_addr_ <= cur_end_);
+ // TODO(hpayer): Remove the debugging code.
+ if (cur_addr_ > cur_end_) {
+ space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
+ obj_size);
+ }
+
if (!obj->IsFiller()) {
DCHECK_OBJECT_SIZE(obj_size);
return obj;
@@ -299,14 +305,6 @@ intptr_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
}
-
-bool FreeListNode::IsFreeListNode(HeapObject* object) {
- Map* map = object->map();
- Heap* heap = object->GetHeap();
- return map == heap->raw_unchecked_free_space_map() ||
- map == heap->raw_unchecked_one_pointer_filler_map() ||
- map == heap->raw_unchecked_two_pointer_filler_map();
-}
}
} // namespace v8::internal
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 060052e706..c2ce5fcfaf 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -10,6 +10,7 @@
#include "src/heap/mark-compact.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
+#include "src/snapshot.h"
namespace v8 {
namespace internal {
@@ -989,6 +990,16 @@ size_t PagedSpace::CommittedPhysicalMemory() {
}
+bool PagedSpace::ContainsSafe(Address addr) {
+ Page* p = Page::FromAddress(addr);
+ PageIterator iterator(this);
+ while (iterator.has_next()) {
+ if (iterator.next() == p) return true;
+ }
+ return false;
+}
+
+
Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on iterable spaces.
DCHECK(!heap()->mark_compact_collector()->in_use());
@@ -1028,13 +1039,16 @@ bool PagedSpace::Expand() {
intptr_t size = AreaSize();
if (anchor_.next_page() == &anchor_) {
- size = SizeOfFirstPage();
+ size = Snapshot::SizeOfFirstPage(identity());
}
Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
executable());
if (p == NULL) return false;
+ // Pages created during bootstrapping may contain immortal immovable objects.
+ if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
+
DCHECK(Capacity() <= max_capacity_);
p->InsertAfter(anchor_.prev_page());
@@ -1043,48 +1057,6 @@ bool PagedSpace::Expand() {
}
-intptr_t PagedSpace::SizeOfFirstPage() {
- // If using an ool constant pool then transfer the constant pool allowance
- // from the code space to the old pointer space.
- static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
- int size = 0;
- switch (identity()) {
- case OLD_POINTER_SPACE:
- size = (128 + constant_pool_delta) * kPointerSize * KB;
- break;
- case OLD_DATA_SPACE:
- size = 192 * KB;
- break;
- case MAP_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case CELL_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case PROPERTY_CELL_SPACE:
- size = 8 * kPointerSize * KB;
- break;
- case CODE_SPACE: {
- CodeRange* code_range = heap()->isolate()->code_range();
- if (code_range != NULL && code_range->valid()) {
- // When code range exists, code pages are allocated in a special way
- // (from the reserved code range). That part of the code is not yet
- // upgraded to handle small pages.
- size = AreaSize();
- } else {
- size = RoundUp((480 - constant_pool_delta) * KB *
- FullCodeGenerator::kBootCodeSizeMultiplier / 100,
- kPointerSize);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- return Min(size, AreaSize());
-}
-
-
int PagedSpace::CountTotalPages() {
PageIterator it(this);
int count = 0;
@@ -2084,79 +2056,6 @@ size_t NewSpace::CommittedPhysicalMemory() {
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
-void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
- DCHECK(size_in_bytes > 0);
- DCHECK(IsAligned(size_in_bytes, kPointerSize));
-
- // We write a map and possibly size information to the block. If the block
- // is big enough to be a FreeSpace with at least one extra word (the next
- // pointer), we set its map to be the free space map and its size to an
- // appropriate array length for the desired size from HeapObject::Size().
- // If the block is too small (eg, one or two words), to hold both a size
- // field and a next pointer, we give it a filler map that gives it the
- // correct size.
- if (size_in_bytes > FreeSpace::kHeaderSize) {
- // Can't use FreeSpace::cast because it fails during deserialization.
- // We have to set the size first with a release store before we store
- // the map because a concurrent store buffer scan on scavenge must not
- // observe a map with an invalid size.
- FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
- this_as_free_space->nobarrier_set_size(size_in_bytes);
- synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
- } else if (size_in_bytes == kPointerSize) {
- set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
- } else if (size_in_bytes == 2 * kPointerSize) {
- set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
- } else {
- UNREACHABLE();
- }
- // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
- // deserialization because the free space map is not done yet.
-}
-
-
-FreeListNode* FreeListNode::next() {
- DCHECK(IsFreeListNode(this));
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kNextOffset));
- } else {
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kPointerSize));
- }
-}
-
-
-FreeListNode** FreeListNode::next_address() {
- DCHECK(IsFreeListNode(this));
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- DCHECK(Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
- } else {
- return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
- }
-}
-
-
-void FreeListNode::set_next(FreeListNode* next) {
- DCHECK(IsFreeListNode(this));
- // While we are booting the VM the free space map will actually be null. So
- // we have to make sure that we don't try to use it for anything at that
- // stage.
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
- base::NoBarrier_Store(
- reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
- reinterpret_cast<base::AtomicWord>(next));
- } else {
- base::NoBarrier_Store(
- reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
- reinterpret_cast<base::AtomicWord>(next));
- }
-}
-
-
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
if (category->top() != NULL) {
@@ -2190,11 +2089,11 @@ void FreeListCategory::Reset() {
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
- FreeListNode* t = top();
- FreeListNode** n = &t;
+ FreeSpace* t = top();
+ FreeSpace** n = &t;
while (*n != NULL) {
if (Page::FromAddress((*n)->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+ FreeSpace* free_space = *n;
sum += free_space->Size();
*n = (*n)->next();
} else {
@@ -2211,7 +2110,7 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
- FreeListNode* node = top();
+ FreeSpace* node = top();
while (node != NULL) {
if (Page::FromAddress(node->address()) == p) return true;
node = node->next();
@@ -2220,20 +2119,20 @@ bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
}
-FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
- FreeListNode* node = top();
+FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+ FreeSpace* node = top();
if (node == NULL) return NULL;
while (node != NULL &&
Page::FromAddress(node->address())->IsEvacuationCandidate()) {
- available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
+ available_ -= node->Size();
node = node->next();
}
if (node != NULL) {
set_top(node->next());
- *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
+ *node_size = node->Size();
available_ -= *node_size;
} else {
set_top(NULL);
@@ -2247,9 +2146,9 @@ FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
}
-FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
- int* node_size) {
- FreeListNode* node = PickNodeFromList(node_size);
+FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
+ int* node_size) {
+ FreeSpace* node = PickNodeFromList(node_size);
if (node != NULL && *node_size < size_in_bytes) {
Free(node, *node_size);
*node_size = 0;
@@ -2259,18 +2158,19 @@ FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
}
-void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top());
- set_top(node);
+void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
+ DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
+ free_space->set_next(top());
+ set_top(free_space);
if (end_ == NULL) {
- end_ = node;
+ end_ = free_space;
}
available_ += size_in_bytes;
}
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top();
+ FreeSpace* n = top();
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
@@ -2309,8 +2209,8 @@ void FreeList::Reset() {
int FreeList::Free(Address start, int size_in_bytes) {
if (size_in_bytes == 0) return 0;
- FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(heap_, size_in_bytes);
+ heap_->CreateFillerObjectAt(start, size_in_bytes);
+
Page* page = Page::FromAddress(start);
// Early return to drop too-small blocks on the floor.
@@ -2319,19 +2219,20 @@ int FreeList::Free(Address start, int size_in_bytes) {
return size_in_bytes;
}
+ FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
if (size_in_bytes <= kSmallListMax) {
- small_list_.Free(node, size_in_bytes);
+ small_list_.Free(free_space, size_in_bytes);
page->add_available_in_small_free_list(size_in_bytes);
} else if (size_in_bytes <= kMediumListMax) {
- medium_list_.Free(node, size_in_bytes);
+ medium_list_.Free(free_space, size_in_bytes);
page->add_available_in_medium_free_list(size_in_bytes);
} else if (size_in_bytes <= kLargeListMax) {
- large_list_.Free(node, size_in_bytes);
+ large_list_.Free(free_space, size_in_bytes);
page->add_available_in_large_free_list(size_in_bytes);
} else {
- huge_list_.Free(node, size_in_bytes);
+ huge_list_.Free(free_space, size_in_bytes);
page->add_available_in_huge_free_list(size_in_bytes);
}
@@ -2340,8 +2241,8 @@ int FreeList::Free(Address start, int size_in_bytes) {
}
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
- FreeListNode* node = NULL;
+FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+ FreeSpace* node = NULL;
Page* page = NULL;
if (size_in_bytes <= kSmallAllocationMax) {
@@ -2378,13 +2279,13 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
int huge_list_available = huge_list_.available();
- FreeListNode* top_node = huge_list_.top();
- for (FreeListNode** cur = &top_node; *cur != NULL;
+ FreeSpace* top_node = huge_list_.top();
+ for (FreeSpace** cur = &top_node; *cur != NULL;
cur = (*cur)->next_address()) {
- FreeListNode* cur_node = *cur;
+ FreeSpace* cur_node = *cur;
while (cur_node != NULL &&
Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
+ int size = cur_node->Size();
huge_list_available -= size;
page = Page::FromAddress(cur_node->address());
page->add_available_in_huge_free_list(-size);
@@ -2397,9 +2298,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
break;
}
- DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
- int size = cur_as_free_space->Size();
+ int size = cur_node->Size();
if (size >= size_in_bytes) {
// Large enough node found. Unlink it from the list.
node = *cur;
@@ -2472,7 +2371,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
old_linear_size);
int new_node_size = 0;
- FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+ FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
owner_->SetTopAndLimit(NULL, NULL);
return NULL;
@@ -2566,11 +2465,10 @@ void FreeList::RepairLists(Heap* heap) {
#ifdef DEBUG
intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
- FreeListNode* cur = top();
+ FreeSpace* cur = top();
while (cur != NULL) {
DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
- sum += cur_as_free_space->nobarrier_size();
+ sum += cur->nobarrier_size();
cur = cur->next();
}
return sum;
@@ -2582,7 +2480,7 @@ static const int kVeryLongFreeList = 500;
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeListNode* cur = top();
+ FreeSpace* cur = top();
while (cur != NULL) {
length++;
cur = cur->next();
@@ -2643,7 +2541,19 @@ intptr_t PagedSpace::SizeOfObjects() {
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
// fix them.
-void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
+void PagedSpace::RepairFreeListsAfterDeserialization() {
+ free_list_.RepairLists(heap());
+ // Each page may have a small free space that is not tracked by a free list.
+ // Update the maps for those free space objects.
+ PageIterator iterator(this);
+ while (iterator.has_next()) {
+ Page* page = iterator.next();
+ int size = static_cast<int>(page->non_available_small_blocks());
+ if (size == 0) continue;
+ Address address = page->OffsetToAddress(Page::kPageSize - size);
+ heap()->CreateFillerObjectAt(address, size);
+ }
+}
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index dcd336437e..2eae02953c 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -373,6 +373,7 @@ class MemoryChunk {
CONTAINS_ONLY_DATA,
EVACUATION_CANDIDATE,
RESCAN_ON_EVACUATION,
+ NEVER_EVACUATE, // May contain immortal immutables.
// WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
// otherwise marking bits are still intact.
@@ -604,7 +605,14 @@ class MemoryChunk {
static const int kFlagsOffset = kPointerSize;
- bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
+ bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
+
+ void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
+
+ bool IsEvacuationCandidate() {
+ DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
+ return IsFlagSet(EVACUATION_CANDIDATE);
+ }
bool ShouldSkipEvacuationSlotRecording() {
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
@@ -619,6 +627,7 @@ class MemoryChunk {
inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
void MarkEvacuationCandidate() {
+ DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK(slots_buffer_ == NULL);
SetFlag(EVACUATION_CANDIDATE);
}
@@ -1276,13 +1285,13 @@ class AllocationInfo {
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
- (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+ (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
top_ = top;
}
INLINE(Address top()) const {
SLOW_DCHECK(top_ == NULL ||
- (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+ (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
return top_;
}
@@ -1290,13 +1299,13 @@ class AllocationInfo {
INLINE(void set_limit(Address limit)) {
SLOW_DCHECK(limit == NULL ||
- (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+ (reinterpret_cast<intptr_t>(limit) & kHeapObjectTagMask) == 0);
limit_ = limit;
}
INLINE(Address limit()) const {
SLOW_DCHECK(limit_ == NULL ||
- (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
+ (reinterpret_cast<intptr_t>(limit_) & kHeapObjectTagMask) ==
0);
return limit_;
}
@@ -1411,45 +1420,6 @@ class AllocationStats BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap. They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object). They have a size and a next pointer. The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode : public HeapObject {
- public:
- // Obtain a free-list node from a raw address. This is not a cast because
- // it does not check nor require that the first word at the address is a map
- // pointer.
- static FreeListNode* FromAddress(Address address) {
- return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
- }
-
- static inline bool IsFreeListNode(HeapObject* object);
-
- // Set the size in bytes, which can be read with HeapObject::Size(). This
- // function also writes a map to the first word of the block so that it
- // looks like a heap object to the garbage collector and heap iteration
- // functions.
- void set_size(Heap* heap, int size_in_bytes);
-
- // Accessors for the next field.
- inline FreeListNode* next();
- inline FreeListNode** next_address();
- inline void set_next(FreeListNode* next);
-
- inline void Zap();
-
- static inline FreeListNode* cast(Object* object) {
- return reinterpret_cast<FreeListNode*>(object);
- }
-
- private:
- static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
// The free list category holds a pointer to the top element and a pointer to
// the end element of the linked list of free memory blocks.
@@ -1461,27 +1431,26 @@ class FreeListCategory {
void Reset();
- void Free(FreeListNode* node, int size_in_bytes);
+ void Free(FreeSpace* node, int size_in_bytes);
- FreeListNode* PickNodeFromList(int* node_size);
- FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
+ FreeSpace* PickNodeFromList(int* node_size);
+ FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
intptr_t EvictFreeListItemsInList(Page* p);
bool ContainsPageFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
- FreeListNode* top() const {
- return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
+ FreeSpace* top() const {
+ return reinterpret_cast<FreeSpace*>(base::NoBarrier_Load(&top_));
}
- void set_top(FreeListNode* top) {
+ void set_top(FreeSpace* top) {
base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
}
- FreeListNode** GetEndAddress() { return &end_; }
- FreeListNode* end() const { return end_; }
- void set_end(FreeListNode* end) { end_ = end; }
+ FreeSpace* end() const { return end_; }
+ void set_end(FreeSpace* end) { end_ = end; }
int* GetAvailableAddress() { return &available_; }
int available() const { return available_; }
@@ -1497,9 +1466,9 @@ class FreeListCategory {
#endif
private:
- // top_ points to the top FreeListNode* in the free list category.
+ // top_ points to the top FreeSpace* in the free list category.
base::AtomicWord top_;
- FreeListNode* end_;
+ FreeSpace* end_;
base::Mutex mutex_;
// Total available bytes in all blocks of this free list category.
@@ -1596,17 +1565,18 @@ class FreeList {
FreeListCategory* large_list() { return &large_list_; }
FreeListCategory* huge_list() { return &huge_list_; }
+ static const int kSmallListMin = 0x20 * kPointerSize;
+
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
- FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+ FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
PagedSpace* owner_;
Heap* heap_;
- static const int kSmallListMin = 0x20 * kPointerSize;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
static const int kLargeListMax = 0x3fff * kPointerSize;
@@ -1693,6 +1663,9 @@ class PagedSpace : public Space {
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
bool Contains(HeapObject* o) { return Contains(o->address()); }
+ // Unlike Contains() methods it is safe to call this one even for addresses
+ // of unmapped memory.
+ bool ContainsSafe(Address addr);
// Given an address occupied by a live object, return that object if it is
// in this space, or a Smi if it is not. The implementation iterates over
@@ -1702,7 +1675,7 @@ class PagedSpace : public Space {
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
- void RepairFreeListsAfterBoot();
+ void RepairFreeListsAfterDeserialization();
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
@@ -1909,8 +1882,6 @@ class PagedSpace : public Space {
// Maximum capacity of this space.
intptr_t max_capacity_;
- intptr_t SizeOfFirstPage();
-
// Accounting information for this space.
AllocationStats accounting_stats_;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index aac68116ce..591d28fe9f 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -55,9 +55,11 @@ void StoreBuffer::SetUp() {
old_limit_ = old_start_ + initial_length;
old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
- CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
- (old_limit_ - old_start_) * kPointerSize,
- false));
+ if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
+ (old_limit_ - old_start_) * kPointerSize,
+ false)) {
+ V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
+ }
DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
@@ -71,9 +73,11 @@ void StoreBuffer::SetUp() {
DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
0);
- CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
- kStoreBufferSize,
- false)); // Not executable.
+ if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+ kStoreBufferSize,
+ false)) { // Not executable.
+ V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
+ }
heap_->public_set_store_buffer_top(start_);
hash_set_1_ = new uintptr_t[kHashSetLength];
@@ -81,6 +85,9 @@ void StoreBuffer::SetUp() {
hash_sets_are_empty_ = false;
ClearFilteringHashSets();
+
+ heap_->isolate()->set_store_buffer_hash_set_1_address(hash_set_1_);
+ heap_->isolate()->set_store_buffer_hash_set_2_address(hash_set_2_);
}
@@ -130,8 +137,10 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
while (old_limit_ - old_top_ < space_needed &&
old_limit_ < old_reserved_limit_) {
size_t grow = old_limit_ - old_start_; // Double size.
- CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
- grow * kPointerSize, false));
+ if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
+ grow * kPointerSize, false)) {
+ V8::FatalProcessOutOfMemory("StoreBuffer::EnsureSpace");
+ }
old_limit_ += grow;
}
@@ -554,6 +563,9 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
void StoreBuffer::Compact() {
+ CHECK(hash_set_1_ == heap_->isolate()->store_buffer_hash_set_1_address());
+ CHECK(hash_set_2_ == heap_->isolate()->store_buffer_hash_set_2_address());
+
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
if (top == start_) return;
diff --git a/deps/v8/src/hydrogen-bch.cc b/deps/v8/src/hydrogen-bch.cc
index 2feb1587f8..875c18c3f9 100644
--- a/deps/v8/src/hydrogen-bch.cc
+++ b/deps/v8/src/hydrogen-bch.cc
@@ -253,7 +253,7 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
HValue* context = graph()->GetInvalidContext();
HValue* limit = data->limit();
if (has_upper_constant_limit) {
- HConstant* new_limit = HConstant::New(zone, context,
+ HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
upper_constant_limit);
new_limit->InsertBefore(pre_header->end());
limit = new_limit;
@@ -263,7 +263,7 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
if (limit->IsInteger32Constant() &&
limit->block() != pre_header &&
!limit->block()->Dominates(pre_header)) {
- HConstant* new_limit = HConstant::New(zone, context,
+ HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
limit->GetInteger32Constant());
new_limit->InsertBefore(pre_header->end());
limit = new_limit;
@@ -271,7 +271,7 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
// Do the hoisting.
HBoundsCheck* hoisted_check = HBoundsCheck::New(
- zone, context, limit, check->check()->length());
+ graph()->isolate(), zone, context, limit, check->check()->length());
hoisted_check->InsertBefore(pre_header->end());
hoisted_check->set_allow_equality(true);
counters()->bounds_checks_hoisted()->Increment();
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc
index 4bdad06503..cdfedb4e51 100644
--- a/deps/v8/src/hydrogen-check-elimination.cc
+++ b/deps/v8/src/hydrogen-check-elimination.cc
@@ -373,7 +373,7 @@ class HCheckTable : public ZoneObject {
instr->DeleteAndReplaceWith(entry->check_);
INC_STAT(redundant_);
} else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
- DCHECK_EQ(NULL, entry->check_);
+ DCHECK_NULL(entry->check_);
TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n",
instr->id(), instr->block()->block_id()));
instr->set_maps(entry->maps_->Copy(graph->zone()));
@@ -693,14 +693,14 @@ class HCheckTable : public ZoneObject {
bool compact = false;
for (int i = 0; i < size_; i++) {
HCheckTableEntry* entry = &entries_[i];
- DCHECK(entry->object_ != NULL);
+ DCHECK_NOT_NULL(entry->object_);
if (phase_->aliasing_->MayAlias(entry->object_, object)) {
entry->object_ = NULL;
compact = true;
}
}
if (compact) Compact();
- DCHECK(Find(object) == NULL);
+ DCHECK_NULL(Find(object));
}
void Compact() {
diff --git a/deps/v8/src/hydrogen-dce.cc b/deps/v8/src/hydrogen-dce.cc
index 360b6945f3..c653fc1b60 100644
--- a/deps/v8/src/hydrogen-dce.cc
+++ b/deps/v8/src/hydrogen-dce.cc
@@ -32,12 +32,13 @@ void HDeadCodeEliminationPhase::MarkLive(
void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
+ AllowHandleDereference allow_deref;
OFStream os(stdout);
os << "[MarkLive ";
if (ref != NULL) {
os << *ref;
} else {
- os << "root ";
+ os << "root";
}
os << " -> " << *instr << "]" << std::endl;
}
diff --git a/deps/v8/src/hydrogen-escape-analysis.cc b/deps/v8/src/hydrogen-escape-analysis.cc
index 3b0f15870f..e22dd88831 100644
--- a/deps/v8/src/hydrogen-escape-analysis.cc
+++ b/deps/v8/src/hydrogen-escape-analysis.cc
@@ -131,8 +131,8 @@ HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
HValue* value = state->map_value();
// TODO(mstarzinger): This will narrow a map check against a set of maps
// down to the first element in the set. Revisit and fix this.
- HCheckValue* check = HCheckValue::New(
- zone, NULL, value, mapcheck->maps()->at(0), false);
+ HCheckValue* check = HCheckValue::New(graph()->isolate(), zone, NULL, value,
+ mapcheck->maps()->at(0), false);
check->InsertBefore(mapcheck);
return check;
}
@@ -146,8 +146,8 @@ HValue* HEscapeAnalysisPhase::NewLoadReplacement(
Representation representation = load->representation();
if (representation.IsSmiOrInteger32() || representation.IsDouble()) {
Zone* zone = graph()->zone();
- HInstruction* new_instr =
- HForceRepresentation::New(zone, NULL, load_value, representation);
+ HInstruction* new_instr = HForceRepresentation::New(
+ graph()->isolate(), zone, NULL, load_value, representation);
new_instr->InsertAfter(load);
replacement = new_instr;
}
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
index 421c6cc103..d04a6eb3aa 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -7,7 +7,6 @@
#include <iosfwd>
-#include "src/compiler.h"
#include "src/hydrogen.h"
#include "src/hydrogen-instructions.h"
#include "src/zone.h"
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 0e6a03d716..b89bcc4d99 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -18,6 +18,8 @@
#include "src/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
@@ -620,17 +622,6 @@ void HValue::ComputeInitialRange(Zone* zone) {
}
-std::ostream& operator<<(std::ostream& os, const HSourcePosition& p) {
- if (p.IsUnknown()) {
- return os << "<?>";
- } else if (FLAG_hydrogen_track_positions) {
- return os << "<" << p.inlining_id() << ":" << p.position() << ">";
- } else {
- return os << "<0:" << p.raw() << ">";
- }
-}
-
-
std::ostream& HInstruction::PrintTo(std::ostream& os) const { // NOLINT
os << Mnemonic() << " ";
PrintDataTo(os) << ChangesOf(this) << TypeOf(this);
@@ -940,17 +931,15 @@ std::ostream& HCallJSFunction::PrintDataTo(std::ostream& os) const { // NOLINT
}
-HCallJSFunction* HCallJSFunction::New(
- Zone* zone,
- HValue* context,
- HValue* function,
- int argument_count,
- bool pass_argument_count) {
+HCallJSFunction* HCallJSFunction::New(Isolate* isolate, Zone* zone,
+ HValue* context, HValue* function,
+ int argument_count,
+ bool pass_argument_count) {
bool has_stack_check = false;
if (function->IsConstant()) {
HConstant* fun_const = HConstant::cast(function);
Handle<JSFunction> jsfun =
- Handle<JSFunction>::cast(fun_const->handle(zone->isolate()));
+ Handle<JSFunction>::cast(fun_const->handle(isolate));
has_stack_check = !jsfun.is_null() &&
(jsfun->code()->kind() == Code::FUNCTION ||
jsfun->code()->kind() == Code::OPTIMIZED_FUNCTION);
@@ -968,6 +957,15 @@ std::ostream& HBinaryCall::PrintDataTo(std::ostream& os) const { // NOLINT
}
+std::ostream& HCallFunction::PrintDataTo(std::ostream& os) const { // NOLINT
+ os << NameOf(context()) << " " << NameOf(function());
+ if (HasVectorAndSlot()) {
+ os << " (type-feedback-vector icslot " << slot().ToInt() << ")";
+ }
+ return os;
+}
+
+
void HBoundsCheck::ApplyIndexChange() {
if (skip_check()) return;
@@ -987,13 +985,16 @@ void HBoundsCheck::ApplyIndexChange() {
int actual_offset = decomposition.offset() + offset();
int actual_scale = decomposition.scale() + scale();
- Zone* zone = block()->graph()->zone();
- HValue* context = block()->graph()->GetInvalidContext();
+ HGraph* graph = block()->graph();
+ Isolate* isolate = graph->isolate();
+ Zone* zone = graph->zone();
+ HValue* context = graph->GetInvalidContext();
if (actual_offset != 0) {
- HConstant* add_offset = HConstant::New(zone, context, actual_offset);
+ HConstant* add_offset =
+ HConstant::New(isolate, zone, context, actual_offset);
add_offset->InsertBefore(this);
- HInstruction* add = HAdd::New(zone, context,
- current_index, add_offset);
+ HInstruction* add =
+ HAdd::New(isolate, zone, context, current_index, add_offset);
add->InsertBefore(this);
add->AssumeRepresentation(index()->representation());
add->ClearFlag(kCanOverflow);
@@ -1001,10 +1002,10 @@ void HBoundsCheck::ApplyIndexChange() {
}
if (actual_scale != 0) {
- HConstant* sar_scale = HConstant::New(zone, context, actual_scale);
+ HConstant* sar_scale = HConstant::New(isolate, zone, context, actual_scale);
sar_scale->InsertBefore(this);
- HInstruction* sar = HSar::New(zone, context,
- current_index, sar_scale);
+ HInstruction* sar =
+ HSar::New(isolate, zone, context, current_index, sar_scale);
sar->InsertBefore(this);
sar->AssumeRepresentation(index()->representation());
current_index = sar;
@@ -1520,8 +1521,9 @@ std::ostream& HTypeof::PrintDataTo(std::ostream& os) const { // NOLINT
}
-HInstruction* HForceRepresentation::New(Zone* zone, HValue* context,
- HValue* value, Representation representation) {
+HInstruction* HForceRepresentation::New(Isolate* isolate, Zone* zone,
+ HValue* context, HValue* value,
+ Representation representation) {
if (FLAG_fold_constants && value->IsConstant()) {
HConstant* c = HConstant::cast(value);
c = c->CopyToRepresentation(representation, zone);
@@ -1593,7 +1595,7 @@ HValue* HUnaryMathOperation::Canonicalize() {
}
return Prepend(HMathFloorOfDiv::New(
- block()->zone(), context(), left, right));
+ block()->graph()->isolate(), block()->zone(), context(), left, right));
}
return this;
}
@@ -1803,9 +1805,7 @@ Range* HConstant::InferRange(Zone* zone) {
}
-HSourcePosition HPhi::position() const {
- return block()->first()->position();
-}
+SourcePosition HPhi::position() const { return block()->first()->position(); }
Range* HPhi::InferRange(Zone* zone) {
@@ -2120,7 +2120,8 @@ void InductionVariableData::ChecksRelatedToLength::UseNewIndexInCurrentBlock(
DCHECK(context != NULL);
Zone* zone = index_base->block()->graph()->zone();
- set_added_constant(HConstant::New(zone, context, mask));
+ Isolate* isolate = index_base->block()->graph()->isolate();
+ set_added_constant(HConstant::New(isolate, zone, context, mask));
if (added_index() != NULL) {
added_constant()->InsertBefore(added_index());
} else {
@@ -2129,8 +2130,8 @@ void InductionVariableData::ChecksRelatedToLength::UseNewIndexInCurrentBlock(
if (added_index() == NULL) {
first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index());
- HInstruction* new_index = HBitwise::New(zone, context, token, index_base,
- added_constant());
+ HInstruction* new_index = HBitwise::New(isolate, zone, context, token,
+ index_base, added_constant());
DCHECK(new_index->IsBitwise());
new_index->ClearAllSideEffects();
new_index->AssumeRepresentation(Representation::Integer32());
@@ -2681,6 +2682,19 @@ static bool IsInteger32(double value) {
}
+HConstant::HConstant(Special special)
+ : HTemplateInstruction<0>(HType::TaggedNumber()),
+ object_(Handle<Object>::null()),
+ object_map_(Handle<Map>::null()),
+ bit_field_(HasDoubleValueField::encode(true) |
+ InstanceTypeField::encode(kUnknownInstanceType)),
+ int32_value_(0) {
+ DCHECK_EQ(kHoleNaN, special);
+ std::memcpy(&double_value_, &kHoleNanInt64, sizeof(double_value_));
+ Initialize(Representation::Double());
+}
+
+
HConstant::HConstant(Handle<Object> object, Representation r)
: HTemplateInstruction<0>(HType::FromValue(object)),
object_(Unique<Object>::CreateUninitialized(object)),
@@ -2783,7 +2797,7 @@ HConstant::HConstant(double double_value, Representation r,
!std::isnan(double_value)) |
IsUndetectableField::encode(false) |
InstanceTypeField::encode(kUnknownInstanceType)),
- int32_value_(HasInteger32Value() ? DoubleToInt32(double_value) : 0),
+ int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
bit_field_ = HasSmiValueField::update(
bit_field_, HasInteger32Value() && Smi::IsValid(int32_value_));
@@ -2842,7 +2856,7 @@ void HConstant::Initialize(Representation r) {
// could cause heap object checks not to get emitted.
object_ = Unique<Object>(Handle<Object>::null());
}
- if (r.IsSmiOrInteger32()) {
+ if (r.IsSmiOrInteger32() && object_.handle().is_null()) {
// If it's not a heap object, it can't be in new space.
bit_field_ = IsNotInNewSpaceField::update(bit_field_, true);
}
@@ -2936,14 +2950,15 @@ Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
}
-Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Zone* zone) {
+Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Isolate* isolate,
+ Zone* zone) {
HConstant* res = NULL;
- Handle<Object> handle = this->handle(zone->isolate());
+ Handle<Object> handle = this->handle(isolate);
if (handle->IsBoolean()) {
res = handle->BooleanValue() ?
new(zone) HConstant(1) : new(zone) HConstant(0);
} else if (handle->IsUndefined()) {
- res = new(zone) HConstant(base::OS::nan_value());
+ res = new (zone) HConstant(std::numeric_limits<double>::quiet_NaN());
} else if (handle->IsNull()) {
res = new(zone) HConstant(0);
}
@@ -3537,18 +3552,14 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
if (names_cache->enumerable() == object()) {
HForInCacheArray* index_cache =
names_cache->index_cache();
- HCheckMapValue* map_check =
- HCheckMapValue::New(block()->graph()->zone(),
- block()->graph()->GetInvalidContext(),
- object(),
- names_cache->map());
+ HCheckMapValue* map_check = HCheckMapValue::New(
+ block()->graph()->isolate(), block()->graph()->zone(),
+ block()->graph()->GetInvalidContext(), object(),
+ names_cache->map());
HInstruction* index = HLoadKeyed::New(
- block()->graph()->zone(),
- block()->graph()->GetInvalidContext(),
- index_cache,
- key_load->key(),
- key_load->key(),
- key_load->elements_kind());
+ block()->graph()->isolate(), block()->graph()->zone(),
+ block()->graph()->GetInvalidContext(), index_cache, key_load->key(),
+ key_load->key(), key_load->elements_kind());
map_check->InsertBefore(this);
index->InsertBefore(this);
return Prepend(new(block()->zone()) HLoadFieldByIndex(
@@ -3711,6 +3722,7 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
DCHECK(side_effect == kNewSpacePromotion);
Zone* zone = block()->zone();
+ Isolate* isolate = block()->isolate();
if (!FLAG_use_allocation_folding) return false;
// Try to fold allocations together with their dominating allocations.
@@ -3812,25 +3824,19 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HInstruction* new_dominator_size_value;
if (current_size->IsInteger32Constant()) {
- new_dominator_size_value =
- HConstant::CreateAndInsertBefore(zone,
- context(),
- new_dominator_size,
- Representation::None(),
- dominator_allocate);
+ new_dominator_size_value = HConstant::CreateAndInsertBefore(
+ isolate, zone, context(), new_dominator_size, Representation::None(),
+ dominator_allocate);
} else {
- HValue* new_dominator_size_constant =
- HConstant::CreateAndInsertBefore(zone,
- context(),
- dominator_size_constant,
- Representation::Integer32(),
- dominator_allocate);
+ HValue* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
+ isolate, zone, context(), dominator_size_constant,
+ Representation::Integer32(), dominator_allocate);
// Add old and new size together and insert.
current_size->ChangeRepresentation(Representation::Integer32());
- new_dominator_size_value = HAdd::New(zone, context(),
- new_dominator_size_constant, current_size);
+ new_dominator_size_value = HAdd::New(
+ isolate, zone, context(), new_dominator_size_constant, current_size);
new_dominator_size_value->ClearFlag(HValue::kCanOverflow);
new_dominator_size_value->ChangeRepresentation(Representation::Integer32());
@@ -3862,18 +3868,11 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
// After that replace the dominated allocate instruction.
HInstruction* inner_offset = HConstant::CreateAndInsertBefore(
- zone,
- context(),
- dominator_size_constant,
- Representation::None(),
+ isolate, zone, context(), dominator_size_constant, Representation::None(),
this);
- HInstruction* dominated_allocate_instr =
- HInnerAllocatedObject::New(zone,
- context(),
- dominator_allocate,
- inner_offset,
- type());
+ HInstruction* dominated_allocate_instr = HInnerAllocatedObject::New(
+ isolate, zone, context(), dominator_allocate, inner_offset, type());
dominated_allocate_instr->InsertBefore(this);
DeleteAndReplaceWith(dominated_allocate_instr);
if (FLAG_trace_allocation_folding) {
@@ -3956,43 +3955,43 @@ void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
// would otherwise automatically choose int32, but the actual store
// requires a Smi-tagged value.
HConstant* new_free_space_size = HConstant::CreateAndInsertBefore(
- zone,
- context(),
+ block()->isolate(), zone, context(),
filler_free_space_size_->value()->GetInteger32Constant() +
free_space_size,
- Representation::Smi(),
- filler_free_space_size_);
+ Representation::Smi(), filler_free_space_size_);
filler_free_space_size_->UpdateValue(new_free_space_size);
}
void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
DCHECK(filler_free_space_size_ == NULL);
+ Isolate* isolate = block()->isolate();
Zone* zone = block()->zone();
HInstruction* free_space_instr =
- HInnerAllocatedObject::New(zone, context(), dominating_allocate_,
- dominating_allocate_->size(), type());
+ HInnerAllocatedObject::New(isolate, zone, context(), dominating_allocate_,
+ dominating_allocate_->size(), type());
free_space_instr->InsertBefore(this);
HConstant* filler_map = HConstant::CreateAndInsertAfter(
- zone, Unique<Map>::CreateImmovable(
- isolate()->factory()->free_space_map()), true, free_space_instr);
- HInstruction* store_map = HStoreNamedField::New(zone, context(),
- free_space_instr, HObjectAccess::ForMap(), filler_map);
+ zone, Unique<Map>::CreateImmovable(isolate->factory()->free_space_map()),
+ true, free_space_instr);
+ HInstruction* store_map =
+ HStoreNamedField::New(isolate, zone, context(), free_space_instr,
+ HObjectAccess::ForMap(), filler_map);
store_map->SetFlag(HValue::kHasNoObservableSideEffects);
store_map->InsertAfter(filler_map);
// We must explicitly force Smi representation here because on x64 we
// would otherwise automatically choose int32, but the actual store
// requires a Smi-tagged value.
- HConstant* filler_size = HConstant::CreateAndInsertAfter(
- zone, context(), free_space_size, Representation::Smi(), store_map);
+ HConstant* filler_size =
+ HConstant::CreateAndInsertAfter(isolate, zone, context(), free_space_size,
+ Representation::Smi(), store_map);
// Must force Smi representation for x64 (see comment above).
- HObjectAccess access =
- HObjectAccess::ForMapAndOffset(isolate()->factory()->free_space_map(),
- FreeSpace::kSizeOffset,
- Representation::Smi());
- HStoreNamedField* store_size = HStoreNamedField::New(zone, context(),
- free_space_instr, access, filler_size);
+ HObjectAccess access = HObjectAccess::ForMapAndOffset(
+ isolate->factory()->free_space_map(), FreeSpace::kSizeOffset,
+ Representation::Smi());
+ HStoreNamedField* store_size = HStoreNamedField::New(
+ isolate, zone, context(), free_space_instr, access, filler_size);
store_size->SetFlag(HValue::kHasNoObservableSideEffects);
store_size->InsertAfter(filler_size);
filler_free_space_size_ = store_size;
@@ -4005,8 +4004,8 @@ void HAllocate::ClearNextMapWord(int offset) {
HObjectAccess access =
HObjectAccess::ForObservableJSObjectOffset(offset);
HStoreNamedField* clear_next_map =
- HStoreNamedField::New(zone, context(), this, access,
- block()->graph()->GetConstant0());
+ HStoreNamedField::New(block()->isolate(), zone, context(), this, access,
+ block()->graph()->GetConstant0());
clear_next_map->ClearAllSideEffects();
clear_next_map->InsertAfter(this);
}
@@ -4038,55 +4037,48 @@ bool HStoreKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) {
bool HStoreKeyed::NeedsCanonicalization() {
- // If value is an integer or smi or comes from the result of a keyed load or
- // constant then it is either be a non-hole value or in the case of a constant
- // the hole is only being stored explicitly: no need for canonicalization.
- //
- // The exception to that is keyed loads from external float or double arrays:
- // these can load arbitrary representation of NaN.
-
- if (value()->IsConstant()) {
- return false;
- }
-
- if (value()->IsLoadKeyed()) {
- return IsExternalFloatOrDoubleElementsKind(
- HLoadKeyed::cast(value())->elements_kind());
- }
-
- if (value()->IsChange()) {
- if (HChange::cast(value())->from().IsSmiOrInteger32()) {
- return false;
+ switch (value()->opcode()) {
+ case kLoadKeyed: {
+ ElementsKind load_kind = HLoadKeyed::cast(value())->elements_kind();
+ return IsExternalFloatOrDoubleElementsKind(load_kind) ||
+ IsFixedFloatElementsKind(load_kind);
}
- if (HChange::cast(value())->value()->type().IsSmi()) {
- return false;
+ case kChange: {
+ Representation from = HChange::cast(value())->from();
+ return from.IsTagged() || from.IsHeapObject();
}
+ case kLoadNamedField:
+ case kPhi: {
+ // Better safe than sorry...
+ return true;
+ }
+ default:
+ return false;
}
- return true;
}
-#define H_CONSTANT_INT(val) \
-HConstant::New(zone, context, static_cast<int32_t>(val))
-#define H_CONSTANT_DOUBLE(val) \
-HConstant::New(zone, context, static_cast<double>(val))
+#define H_CONSTANT_INT(val) \
+ HConstant::New(isolate, zone, context, static_cast<int32_t>(val))
+#define H_CONSTANT_DOUBLE(val) \
+ HConstant::New(isolate, zone, context, static_cast<double>(val))
-#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
-HInstruction* HInstr::New( \
- Zone* zone, HValue* context, HValue* left, HValue* right) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
- if (IsInt32Double(double_res)) { \
- return H_CONSTANT_INT(double_res); \
- } \
- return H_CONSTANT_DOUBLE(double_res); \
- } \
- } \
- return new(zone) HInstr(context, left, right); \
-}
+#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
+ HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
+ HValue* left, HValue* right) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
+ HConstant* c_left = HConstant::cast(left); \
+ HConstant* c_right = HConstant::cast(right); \
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
+ double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
+ if (IsInt32Double(double_res)) { \
+ return H_CONSTANT_INT(double_res); \
+ } \
+ return H_CONSTANT_DOUBLE(double_res); \
+ } \
+ } \
+ return new (zone) HInstr(context, left, right); \
+ }
DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
@@ -4096,10 +4088,8 @@ DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
-HInstruction* HStringAdd::New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right,
+HInstruction* HStringAdd::New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right,
PretenureFlag pretenure_flag,
StringAddFlags flags,
Handle<AllocationSite> allocation_site) {
@@ -4111,9 +4101,9 @@ HInstruction* HStringAdd::New(Zone* zone,
Handle<String> right_string = c_right->StringValue();
// Prevent possible exception by invalid string length.
if (left_string->length() + right_string->length() < String::kMaxLength) {
- MaybeHandle<String> concat = zone->isolate()->factory()->NewConsString(
+ MaybeHandle<String> concat = isolate->factory()->NewConsString(
c_left->StringValue(), c_right->StringValue());
- return HConstant::New(zone, context, concat.ToHandleChecked());
+ return HConstant::New(isolate, zone, context, concat.ToHandleChecked());
}
}
}
@@ -4140,26 +4130,28 @@ std::ostream& HStringAdd::PrintDataTo(std::ostream& os) const { // NOLINT
}
-HInstruction* HStringCharFromCode::New(
- Zone* zone, HValue* context, HValue* char_code) {
+HInstruction* HStringCharFromCode::New(Isolate* isolate, Zone* zone,
+ HValue* context, HValue* char_code) {
if (FLAG_fold_constants && char_code->IsConstant()) {
HConstant* c_code = HConstant::cast(char_code);
- Isolate* isolate = zone->isolate();
if (c_code->HasNumberValue()) {
if (std::isfinite(c_code->DoubleValue())) {
uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
- return HConstant::New(zone, context,
+ return HConstant::New(
+ isolate, zone, context,
isolate->factory()->LookupSingleCharacterStringFromCode(code));
}
- return HConstant::New(zone, context, isolate->factory()->empty_string());
+ return HConstant::New(isolate, zone, context,
+ isolate->factory()->empty_string());
}
}
return new(zone) HStringCharFromCode(context, char_code);
}
-HInstruction* HUnaryMathOperation::New(
- Zone* zone, HValue* context, HValue* value, BuiltinFunctionId op) {
+HInstruction* HUnaryMathOperation::New(Isolate* isolate, Zone* zone,
+ HValue* context, HValue* value,
+ BuiltinFunctionId op) {
do {
if (!FLAG_fold_constants) break;
if (!value->IsConstant()) break;
@@ -4167,7 +4159,7 @@ HInstruction* HUnaryMathOperation::New(
if (!constant->HasNumberValue()) break;
double d = constant->DoubleValue();
if (std::isnan(d)) { // NaN poisons everything.
- return H_CONSTANT_DOUBLE(base::OS::nan_value());
+ return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
}
if (std::isinf(d)) { // +Infinity and -Infinity.
switch (op) {
@@ -4175,7 +4167,8 @@ HInstruction* HUnaryMathOperation::New(
return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
case kMathLog:
case kMathSqrt:
- return H_CONSTANT_DOUBLE((d > 0.0) ? d : base::OS::nan_value());
+ return H_CONSTANT_DOUBLE(
+ (d > 0.0) ? d : std::numeric_limits<double>::quiet_NaN());
case kMathPowHalf:
case kMathAbs:
return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d);
@@ -4262,26 +4255,25 @@ Representation HUnaryMathOperation::RepresentationFromUses() {
}
-HInstruction* HPower::New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
+HInstruction* HPower::New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
double result = power_helper(c_left->DoubleValue(),
c_right->DoubleValue());
- return H_CONSTANT_DOUBLE(std::isnan(result) ? base::OS::nan_value()
- : result);
+ return H_CONSTANT_DOUBLE(std::isnan(result)
+ ? std::numeric_limits<double>::quiet_NaN()
+ : result);
}
}
return new(zone) HPower(left, right);
}
-HInstruction* HMathMinMax::New(
- Zone* zone, HValue* context, HValue* left, HValue* right, Operation op) {
+HInstruction* HMathMinMax::New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right, Operation op) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4306,17 +4298,15 @@ HInstruction* HMathMinMax::New(
}
}
// All comparisons failed, must be NaN.
- return H_CONSTANT_DOUBLE(base::OS::nan_value());
+ return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
}
}
return new(zone) HMathMinMax(context, left, right, op);
}
-HInstruction* HMod::New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
+HInstruction* HMod::New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4339,8 +4329,8 @@ HInstruction* HMod::New(Zone* zone,
}
-HInstruction* HDiv::New(
- Zone* zone, HValue* context, HValue* left, HValue* right) {
+HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right) {
// If left and right are constant values, try to return a constant value.
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
@@ -4363,8 +4353,8 @@ HInstruction* HDiv::New(
}
-HInstruction* HBitwise::New(
- Zone* zone, HValue* context, Token::Value op, HValue* left, HValue* right) {
+HInstruction* HBitwise::New(Isolate* isolate, Zone* zone, HValue* context,
+ Token::Value op, HValue* left, HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4393,18 +4383,18 @@ HInstruction* HBitwise::New(
}
-#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
-HInstruction* HInstr::New( \
- Zone* zone, HValue* context, HValue* left, HValue* right) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- return H_CONSTANT_INT(result); \
- } \
- } \
- return new(zone) HInstr(context, left, right); \
-}
+#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
+ HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
+ HValue* left, HValue* right) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
+ HConstant* c_left = HConstant::cast(left); \
+ HConstant* c_right = HConstant::cast(right); \
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
+ return H_CONSTANT_INT(result); \
+ } \
+ } \
+ return new (zone) HInstr(context, left, right); \
+ }
DEFINE_NEW_H_BITWISE_INSTR(HSar,
@@ -4415,8 +4405,8 @@ c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
#undef DEFINE_NEW_H_BITWISE_INSTR
-HInstruction* HShr::New(
- Zone* zone, HValue* context, HValue* left, HValue* right) {
+HInstruction* HShr::New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4433,11 +4423,9 @@ HInstruction* HShr::New(
}
-HInstruction* HSeqStringGetChar::New(Zone* zone,
- HValue* context,
- String::Encoding encoding,
- HValue* string,
- HValue* index) {
+HInstruction* HSeqStringGetChar::New(Isolate* isolate, Zone* zone,
+ HValue* context, String::Encoding encoding,
+ HValue* string, HValue* index) {
if (FLAG_fold_constants && string->IsConstant() && index->IsConstant()) {
HConstant* c_string = HConstant::cast(string);
HConstant* c_index = HConstant::cast(index);
@@ -4476,9 +4464,9 @@ void HPhi::SimplifyConstantInputs() {
if (operand->HasInteger32Value()) {
continue;
} else if (operand->HasDoubleValue()) {
- HConstant* integer_input =
- HConstant::New(graph->zone(), graph->GetInvalidContext(),
- DoubleToInt32(operand->DoubleValue()));
+ HConstant* integer_input = HConstant::New(
+ graph->isolate(), graph->zone(), graph->GetInvalidContext(),
+ DoubleToInt32(operand->DoubleValue()));
integer_input->InsertAfter(operand);
SetOperandAt(i, integer_input);
} else if (operand->HasBooleanValue()) {
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index c863612cdd..8fd2aed0af 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -5,6 +5,7 @@
#ifndef V8_HYDROGEN_INSTRUCTIONS_H_
#define V8_HYDROGEN_INSTRUCTIONS_H_
+#include <cstring>
#include <iosfwd>
#include "src/v8.h"
@@ -411,62 +412,6 @@ class DecompositionResult FINAL BASE_EMBEDDED {
typedef EnumSet<GVNFlag, int32_t> GVNFlagSet;
-// This class encapsulates encoding and decoding of sources positions from
-// which hydrogen values originated.
-// When FLAG_track_hydrogen_positions is set this object encodes the
-// identifier of the inlining and absolute offset from the start of the
-// inlined function.
-// When the flag is not set we simply track absolute offset from the
-// script start.
-class HSourcePosition {
- public:
- HSourcePosition(const HSourcePosition& other) : value_(other.value_) { }
-
- static HSourcePosition Unknown() {
- return HSourcePosition(RelocInfo::kNoPosition);
- }
-
- bool IsUnknown() const { return value_ == RelocInfo::kNoPosition; }
-
- int position() const { return PositionField::decode(value_); }
- void set_position(int position) {
- if (FLAG_hydrogen_track_positions) {
- value_ = static_cast<int>(PositionField::update(value_, position));
- } else {
- value_ = position;
- }
- }
-
- int inlining_id() const { return InliningIdField::decode(value_); }
- void set_inlining_id(int inlining_id) {
- if (FLAG_hydrogen_track_positions) {
- value_ = static_cast<int>(InliningIdField::update(value_, inlining_id));
- }
- }
-
- int raw() const { return value_; }
-
- private:
- typedef BitField<int, 0, 9> InliningIdField;
-
- // Offset from the start of the inlined function.
- typedef BitField<int, 9, 23> PositionField;
-
- explicit HSourcePosition(int value) : value_(value) { }
-
- friend class HPositionInfo;
- friend class LCodeGenBase;
-
- // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
- // and PositionField.
- // Otherwise contains absolute offset from the script start.
- int value_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const HSourcePosition& p);
-
-
class HValue : public ZoneObject {
public:
static const int kNoNumber = -1;
@@ -562,10 +507,8 @@ class HValue : public ZoneObject {
flags_(0) {}
virtual ~HValue() {}
- virtual HSourcePosition position() const {
- return HSourcePosition::Unknown();
- }
- virtual HSourcePosition operand_position(int index) const {
+ virtual SourcePosition position() const { return SourcePosition::Unknown(); }
+ virtual SourcePosition operand_position(int index) const {
return position();
}
@@ -951,98 +894,76 @@ std::ostream& operator<<(std::ostream& os, const TypeOf& v);
std::ostream& operator<<(std::ostream& os, const ChangesOf& v);
-#define DECLARE_INSTRUCTION_FACTORY_P0(I) \
- static I* New(Zone* zone, HValue* context) { \
- return new(zone) I(); \
-}
+#define DECLARE_INSTRUCTION_FACTORY_P0(I) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context) { \
+ return new (zone) I(); \
+ }
-#define DECLARE_INSTRUCTION_FACTORY_P1(I, P1) \
- static I* New(Zone* zone, HValue* context, P1 p1) { \
- return new(zone) I(p1); \
+#define DECLARE_INSTRUCTION_FACTORY_P1(I, P1) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1) { \
+ return new (zone) I(p1); \
}
#define DECLARE_INSTRUCTION_FACTORY_P2(I, P1, P2) \
- static I* New(Zone* zone, HValue* context, P1 p1, P2 p2) { \
- return new(zone) I(p1, p2); \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2) { \
+ return new (zone) I(p1, p2); \
}
-#define DECLARE_INSTRUCTION_FACTORY_P3(I, P1, P2, P3) \
- static I* New(Zone* zone, HValue* context, P1 p1, P2 p2, P3 p3) { \
- return new(zone) I(p1, p2, p3); \
+#define DECLARE_INSTRUCTION_FACTORY_P3(I, P1, P2, P3) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3) { \
+ return new (zone) I(p1, p2, p3); \
}
-#define DECLARE_INSTRUCTION_FACTORY_P4(I, P1, P2, P3, P4) \
- static I* New(Zone* zone, \
- HValue* context, \
- P1 p1, \
- P2 p2, \
- P3 p3, \
- P4 p4) { \
- return new(zone) I(p1, p2, p3, p4); \
+#define DECLARE_INSTRUCTION_FACTORY_P4(I, P1, P2, P3, P4) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3, P4 p4) { \
+ return new (zone) I(p1, p2, p3, p4); \
}
-#define DECLARE_INSTRUCTION_FACTORY_P5(I, P1, P2, P3, P4, P5) \
- static I* New(Zone* zone, \
- HValue* context, \
- P1 p1, \
- P2 p2, \
- P3 p3, \
- P4 p4, \
- P5 p5) { \
- return new(zone) I(p1, p2, p3, p4, p5); \
+#define DECLARE_INSTRUCTION_FACTORY_P5(I, P1, P2, P3, P4, P5) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3, P4 p4, P5 p5) { \
+ return new (zone) I(p1, p2, p3, p4, p5); \
}
-#define DECLARE_INSTRUCTION_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \
- static I* New(Zone* zone, \
- HValue* context, \
- P1 p1, \
- P2 p2, \
- P3 p3, \
- P4 p4, \
- P5 p5, \
- P6 p6) { \
- return new(zone) I(p1, p2, p3, p4, p5, p6); \
+#define DECLARE_INSTRUCTION_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3, P4 p4, P5 p5, P6 p6) { \
+ return new (zone) I(p1, p2, p3, p4, p5, p6); \
}
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
- static I* New(Zone* zone, HValue* context) { \
- return new(zone) I(context); \
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context) { \
+ return new (zone) I(context); \
}
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1) \
- static I* New(Zone* zone, HValue* context, P1 p1) { \
- return new(zone) I(context, p1); \
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1) { \
+ return new (zone) I(context, p1); \
}
#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(I, P1, P2) \
- static I* New(Zone* zone, HValue* context, P1 p1, P2 p2) { \
- return new(zone) I(context, p1, p2); \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2) { \
+ return new (zone) I(context, p1, p2); \
}
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3) \
- static I* New(Zone* zone, HValue* context, P1 p1, P2 p2, P3 p3) { \
- return new(zone) I(context, p1, p2, p3); \
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3) { \
+ return new (zone) I(context, p1, p2, p3); \
}
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4) \
- static I* New(Zone* zone, \
- HValue* context, \
- P1 p1, \
- P2 p2, \
- P3 p3, \
- P4 p4) { \
- return new(zone) I(context, p1, p2, p3, p4); \
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3, P4 p4) { \
+ return new (zone) I(context, p1, p2, p3, p4); \
}
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5) \
- static I* New(Zone* zone, \
- HValue* context, \
- P1 p1, \
- P2 p2, \
- P3 p3, \
- P4 p4, \
- P5 p5) { \
- return new(zone) I(context, p1, p2, p3, p4, p5); \
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3, P4 p4, P5 p5) { \
+ return new (zone) I(context, p1, p2, p3, p4, p5); \
}
@@ -1057,14 +978,14 @@ class HPositionInfo {
public:
explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
- HSourcePosition position() const {
+ SourcePosition position() const {
if (has_operand_positions()) {
return operand_positions()[kInstructionPosIndex];
}
- return HSourcePosition(static_cast<int>(UntagPosition(data_)));
+ return SourcePosition(static_cast<int>(UntagPosition(data_)));
}
- void set_position(HSourcePosition pos) {
+ void set_position(SourcePosition pos) {
if (has_operand_positions()) {
operand_positions()[kInstructionPosIndex] = pos;
} else {
@@ -1078,27 +999,26 @@ class HPositionInfo {
}
const int length = kFirstOperandPosIndex + operand_count;
- HSourcePosition* positions =
- zone->NewArray<HSourcePosition>(length);
+ SourcePosition* positions = zone->NewArray<SourcePosition>(length);
for (int i = 0; i < length; i++) {
- positions[i] = HSourcePosition::Unknown();
+ positions[i] = SourcePosition::Unknown();
}
- const HSourcePosition pos = position();
+ const SourcePosition pos = position();
data_ = reinterpret_cast<intptr_t>(positions);
set_position(pos);
DCHECK(has_operand_positions());
}
- HSourcePosition operand_position(int idx) const {
+ SourcePosition operand_position(int idx) const {
if (!has_operand_positions()) {
return position();
}
return *operand_position_slot(idx);
}
- void set_operand_position(int idx, HSourcePosition pos) {
+ void set_operand_position(int idx, SourcePosition pos) {
*operand_position_slot(idx) = pos;
}
@@ -1106,7 +1026,7 @@ class HPositionInfo {
static const intptr_t kInstructionPosIndex = 0;
static const intptr_t kFirstOperandPosIndex = 1;
- HSourcePosition* operand_position_slot(int idx) const {
+ SourcePosition* operand_position_slot(int idx) const {
DCHECK(has_operand_positions());
return &(operand_positions()[kFirstOperandPosIndex + idx]);
}
@@ -1115,9 +1035,9 @@ class HPositionInfo {
return !IsTaggedPosition(data_);
}
- HSourcePosition* operand_positions() const {
+ SourcePosition* operand_positions() const {
DCHECK(has_operand_positions());
- return reinterpret_cast<HSourcePosition*>(data_);
+ return reinterpret_cast<SourcePosition*>(data_);
}
static const intptr_t kPositionTag = 1;
@@ -1165,23 +1085,23 @@ class HInstruction : public HValue {
}
// The position is a write-once variable.
- HSourcePosition position() const OVERRIDE {
- return HSourcePosition(position_.position());
+ SourcePosition position() const OVERRIDE {
+ return SourcePosition(position_.position());
}
bool has_position() const {
return !position().IsUnknown();
}
- void set_position(HSourcePosition position) {
+ void set_position(SourcePosition position) {
DCHECK(!has_position());
DCHECK(!position.IsUnknown());
position_.set_position(position);
}
- HSourcePosition operand_position(int index) const OVERRIDE {
- const HSourcePosition pos = position_.operand_position(index);
+ SourcePosition operand_position(int index) const OVERRIDE {
+ const SourcePosition pos = position_.operand_position(index);
return pos.IsUnknown() ? position() : pos;
}
- void set_operand_position(Zone* zone, int index, HSourcePosition pos) {
+ void set_operand_position(Zone* zone, int index, SourcePosition pos) {
DCHECK(0 <= index && index < OperandCount());
position_.ensure_storage_for_operand_positions(zone, OperandCount());
position_.set_operand_position(index, pos);
@@ -1383,9 +1303,8 @@ class HGoto FINAL : public HTemplateControlInstruction<1, 0> {
class HDeoptimize FINAL : public HTemplateControlInstruction<1, 0> {
public:
- static HDeoptimize* New(Zone* zone,
- HValue* context,
- const char* reason,
+ static HDeoptimize* New(Isolate* isolate, Zone* zone, HValue* context,
+ Deoptimizer::DeoptReason reason,
Deoptimizer::BailoutType type,
HBasicBlock* unreachable_continuation) {
return new(zone) HDeoptimize(reason, type, unreachable_continuation);
@@ -1400,20 +1319,20 @@ class HDeoptimize FINAL : public HTemplateControlInstruction<1, 0> {
return Representation::None();
}
- const char* reason() const { return reason_; }
+ Deoptimizer::DeoptReason reason() const { return reason_; }
Deoptimizer::BailoutType type() { return type_; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
private:
- explicit HDeoptimize(const char* reason,
+ explicit HDeoptimize(Deoptimizer::DeoptReason reason,
Deoptimizer::BailoutType type,
HBasicBlock* unreachable_continuation)
: reason_(reason), type_(type) {
SetSuccessorAt(0, unreachable_continuation);
}
- const char* reason_;
+ Deoptimizer::DeoptReason reason_;
Deoptimizer::BailoutType type_;
};
@@ -1636,7 +1555,8 @@ class HUseConst FINAL : public HUnaryOperation {
class HForceRepresentation FINAL : public HTemplateInstruction<1> {
public:
- static HInstruction* New(Zone* zone, HValue* context, HValue* value,
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* value,
Representation required_representation);
HValue* value() const { return OperandAt(0); }
@@ -2017,8 +1937,8 @@ class HConstant;
class HEnterInlined FINAL : public HTemplateInstruction<0> {
public:
- static HEnterInlined* New(Zone* zone, HValue* context, BailoutId return_id,
- Handle<JSFunction> closure,
+ static HEnterInlined* New(Isolate* isolate, Zone* zone, HValue* context,
+ BailoutId return_id, Handle<JSFunction> closure,
HConstant* closure_context, int arguments_count,
FunctionLiteral* function,
InliningKind inlining_kind, Variable* arguments_var,
@@ -2106,31 +2026,33 @@ class HLeaveInlined FINAL : public HTemplateInstruction<0> {
class HPushArguments FINAL : public HInstruction {
public:
- static HPushArguments* New(Zone* zone, HValue* context) {
+ static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context) {
return new(zone) HPushArguments(zone);
}
- static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1) {
+ static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* arg1) {
HPushArguments* instr = new(zone) HPushArguments(zone);
instr->AddInput(arg1);
return instr;
}
- static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1,
- HValue* arg2) {
+ static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* arg1, HValue* arg2) {
HPushArguments* instr = new(zone) HPushArguments(zone);
instr->AddInput(arg1);
instr->AddInput(arg2);
return instr;
}
- static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1,
- HValue* arg2, HValue* arg3) {
+ static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* arg1, HValue* arg2, HValue* arg3) {
HPushArguments* instr = new(zone) HPushArguments(zone);
instr->AddInput(arg1);
instr->AddInput(arg2);
instr->AddInput(arg3);
return instr;
}
- static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1,
- HValue* arg2, HValue* arg3, HValue* arg4) {
+ static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* arg1, HValue* arg2, HValue* arg3,
+ HValue* arg4) {
HPushArguments* instr = new(zone) HPushArguments(zone);
instr->AddInput(arg1);
instr->AddInput(arg2);
@@ -2281,10 +2203,8 @@ class HBinaryCall : public HCall<2> {
class HCallJSFunction FINAL : public HCall<1> {
public:
- static HCallJSFunction* New(Zone* zone,
- HValue* context,
- HValue* function,
- int argument_count,
+ static HCallJSFunction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* function, int argument_count,
bool pass_argument_count);
HValue* function() const { return OperandAt(0); }
@@ -2324,8 +2244,8 @@ enum CallMode { NORMAL_CALL, TAIL_CALL };
class HCallWithDescriptor FINAL : public HInstruction {
public:
- static HCallWithDescriptor* New(Zone* zone, HValue* context, HValue* target,
- int argument_count,
+ static HCallWithDescriptor* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* target, int argument_count,
CallInterfaceDescriptor descriptor,
const Vector<HValue*>& operands,
CallMode call_mode = NORMAL_CALL) {
@@ -2414,15 +2334,16 @@ class HInvokeFunction FINAL : public HBinaryCall {
int argument_count)
: HBinaryCall(context, function, argument_count),
known_function_(known_function) {
- formal_parameter_count_ = known_function.is_null()
- ? 0 : known_function->shared()->formal_parameter_count();
+ formal_parameter_count_ =
+ known_function.is_null()
+ ? 0
+ : known_function->shared()->internal_formal_parameter_count();
has_stack_check_ = !known_function.is_null() &&
(known_function->code()->kind() == Code::FUNCTION ||
known_function->code()->kind() == Code::OPTIMIZED_FUNCTION);
}
- static HInvokeFunction* New(Zone* zone,
- HValue* context,
+ static HInvokeFunction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* function,
Handle<JSFunction> known_function,
int argument_count) {
@@ -2457,22 +2378,36 @@ class HCallFunction FINAL : public HBinaryCall {
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(
HCallFunction, HValue*, int, CallFunctionFlags);
- HValue* context() { return first(); }
- HValue* function() { return second(); }
+ HValue* context() const { return first(); }
+ HValue* function() const { return second(); }
CallFunctionFlags function_flags() const { return function_flags_; }
+ FeedbackVectorICSlot slot() const { return slot_; }
+ Handle<TypeFeedbackVector> feedback_vector() const {
+ return feedback_vector_;
+ }
+ bool HasVectorAndSlot() const { return !feedback_vector_.is_null(); }
+ void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
+ FeedbackVectorICSlot slot) {
+ feedback_vector_ = vector;
+ slot_ = slot;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction)
+ std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE; // NOLINT
+
int argument_delta() const OVERRIDE { return -argument_count(); }
private:
- HCallFunction(HValue* context,
- HValue* function,
- int argument_count,
+ HCallFunction(HValue* context, HValue* function, int argument_count,
CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS)
- : HBinaryCall(context, function, argument_count), function_flags_(flags) {
- }
+ : HBinaryCall(context, function, argument_count),
+ function_flags_(flags),
+ slot_(FeedbackVectorICSlot::Invalid()) {}
CallFunctionFlags function_flags_;
+ Handle<TypeFeedbackVector> feedback_vector_;
+ FeedbackVectorICSlot slot_;
};
@@ -2583,10 +2518,8 @@ class HMapEnumLength FINAL : public HUnaryOperation {
class HUnaryMathOperation FINAL : public HTemplateInstruction<2> {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* value,
- BuiltinFunctionId op);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* value, BuiltinFunctionId op);
HValue* context() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
@@ -2727,12 +2660,13 @@ class HLoadRoot FINAL : public HTemplateInstruction<0> {
class HCheckMaps FINAL : public HTemplateInstruction<2> {
public:
- static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
- Handle<Map> map, HValue* typecheck = NULL) {
+ static HCheckMaps* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* value, Handle<Map> map,
+ HValue* typecheck = NULL) {
return new(zone) HCheckMaps(value, new(zone) UniqueSet<Map>(
Unique<Map>::CreateImmovable(map), zone), typecheck);
}
- static HCheckMaps* New(Zone* zone, HValue* context,
+ static HCheckMaps* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* value, SmallMapList* map_list,
HValue* typecheck = NULL) {
UniqueSet<Map>* maps = new(zone) UniqueSet<Map>(map_list->length(), zone);
@@ -2862,9 +2796,9 @@ class HCheckMaps FINAL : public HTemplateInstruction<2> {
class HCheckValue FINAL : public HUnaryOperation {
public:
- static HCheckValue* New(Zone* zone, HValue* context,
+ static HCheckValue* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* value, Handle<JSFunction> func) {
- bool in_new_space = zone->isolate()->heap()->InNewSpace(*func);
+ bool in_new_space = isolate->heap()->InNewSpace(*func);
// NOTE: We create an uninitialized Unique and initialize it later.
// This is because a JSFunction can move due to GC during graph creation.
// TODO(titzer): This is a migration crutch. Replace with some kind of
@@ -2873,7 +2807,7 @@ class HCheckValue FINAL : public HUnaryOperation {
HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
return check;
}
- static HCheckValue* New(Zone* zone, HValue* context,
+ static HCheckValue* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* value, Unique<HeapObject> target,
bool object_in_new_space) {
return new(zone) HCheckValue(value, target, object_in_new_space);
@@ -3304,7 +3238,7 @@ class HPhi FINAL : public HValue {
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
- HSourcePosition position() const OVERRIDE;
+ SourcePosition position() const OVERRIDE;
int merged_index() const { return merged_index_; }
@@ -3415,7 +3349,8 @@ class HDematerializedObject : public HInstruction {
class HArgumentsObject FINAL : public HDematerializedObject {
public:
- static HArgumentsObject* New(Zone* zone, HValue* context, int count) {
+ static HArgumentsObject* New(Isolate* isolate, Zone* zone, HValue* context,
+ int count) {
return new(zone) HArgumentsObject(count, zone);
}
@@ -3483,19 +3418,21 @@ class HCapturedObject FINAL : public HDematerializedObject {
class HConstant FINAL : public HTemplateInstruction<0> {
public:
+ enum Special { kHoleNaN };
+
+ DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Special);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, int32_t);
DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle<Object>);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference);
- static HConstant* CreateAndInsertAfter(Zone* zone,
- HValue* context,
- int32_t value,
+ static HConstant* CreateAndInsertAfter(Isolate* isolate, Zone* zone,
+ HValue* context, int32_t value,
Representation representation,
HInstruction* instruction) {
- return instruction->Append(HConstant::New(
- zone, context, value, representation));
+ return instruction->Append(
+ HConstant::New(isolate, zone, context, value, representation));
}
Handle<Map> GetMonomorphicJSObjectMap() OVERRIDE {
@@ -3506,13 +3443,12 @@ class HConstant FINAL : public HTemplateInstruction<0> {
return Handle<Map>();
}
- static HConstant* CreateAndInsertBefore(Zone* zone,
- HValue* context,
- int32_t value,
+ static HConstant* CreateAndInsertBefore(Isolate* isolate, Zone* zone,
+ HValue* context, int32_t value,
Representation representation,
HInstruction* instruction) {
- return instruction->Prepend(HConstant::New(
- zone, context, value, representation));
+ return instruction->Prepend(
+ HConstant::New(isolate, zone, context, value, representation));
}
static HConstant* CreateAndInsertBefore(Zone* zone,
@@ -3550,7 +3486,6 @@ class HConstant FINAL : public HTemplateInstruction<0> {
bool IsSpecialDouble() const {
return HasDoubleValue() &&
(bit_cast<int64_t>(double_value_) == bit_cast<int64_t>(-0.0) ||
- FixedDoubleArray::is_the_hole_nan(double_value_) ||
std::isnan(double_value_));
}
@@ -3581,7 +3516,7 @@ class HConstant FINAL : public HTemplateInstruction<0> {
std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE; // NOLINT
HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
- Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone);
+ Maybe<HConstant*> CopyToTruncatedNumber(Isolate* isolate, Zone* zone);
bool HasInteger32Value() const {
return HasInt32ValueField::decode(bit_field_);
}
@@ -3597,8 +3532,15 @@ class HConstant FINAL : public HTemplateInstruction<0> {
DCHECK(HasDoubleValue());
return double_value_;
}
+ uint64_t DoubleValueAsBits() const {
+ uint64_t bits;
+ DCHECK(HasDoubleValue());
+ STATIC_ASSERT(sizeof(bits) == sizeof(double_value_));
+ std::memcpy(&bits, &double_value_, sizeof(bits));
+ return bits;
+ }
bool IsTheHole() const {
- if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) {
+ if (HasDoubleValue() && DoubleValueAsBits() == kHoleNanInt64) {
return true;
}
return object_.IsInitialized() &&
@@ -3661,7 +3603,11 @@ class HConstant FINAL : public HTemplateInstruction<0> {
if (HasInteger32Value()) {
return static_cast<intptr_t>(int32_value_);
} else if (HasDoubleValue()) {
- return static_cast<intptr_t>(bit_cast<int64_t>(double_value_));
+ uint64_t bits = DoubleValueAsBits();
+ if (sizeof(bits) > sizeof(intptr_t)) {
+ bits ^= (bits >> 32);
+ }
+ return static_cast<intptr_t>(bits);
} else if (HasExternalReferenceValue()) {
return reinterpret_cast<intptr_t>(external_reference_value_.address());
} else {
@@ -3692,8 +3638,8 @@ class HConstant FINAL : public HTemplateInstruction<0> {
int32_value_ == other_constant->int32_value_;
} else if (HasDoubleValue()) {
return other_constant->HasDoubleValue() &&
- bit_cast<int64_t>(double_value_) ==
- bit_cast<int64_t>(other_constant->double_value_);
+ std::memcmp(&double_value_, &other_constant->double_value_,
+ sizeof(double_value_)) == 0;
} else if (HasExternalReferenceValue()) {
return other_constant->HasExternalReferenceValue() &&
external_reference_value_ ==
@@ -3720,6 +3666,7 @@ class HConstant FINAL : public HTemplateInstruction<0> {
private:
friend class HGraph;
+ explicit HConstant(Special special);
explicit HConstant(Handle<Object> handle,
Representation r = Representation::None());
HConstant(int32_t value,
@@ -3860,9 +3807,8 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return representation();
}
- void SetOperandPositions(Zone* zone,
- HSourcePosition left_pos,
- HSourcePosition right_pos) {
+ void SetOperandPositions(Zone* zone, SourcePosition left_pos,
+ SourcePosition right_pos) {
set_operand_position(zone, 1, left_pos);
set_operand_position(zone, 2, right_pos);
}
@@ -4320,9 +4266,8 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE; // NOLINT
- void SetOperandPositions(Zone* zone,
- HSourcePosition left_pos,
- HSourcePosition right_pos) {
+ void SetOperandPositions(Zone* zone, SourcePosition left_pos,
+ SourcePosition right_pos) {
set_operand_position(zone, 0, left_pos);
set_operand_position(zone, 1, right_pos);
}
@@ -4780,10 +4725,8 @@ class HInstanceOfKnownGlobal FINAL : public HTemplateInstruction<2> {
class HPower FINAL : public HTemplateInstruction<2> {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right);
HValue* left() { return OperandAt(0); }
HValue* right() const { return OperandAt(1); }
@@ -4819,10 +4762,8 @@ class HPower FINAL : public HTemplateInstruction<2> {
class HAdd FINAL : public HArithmeticBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right);
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
@@ -4882,10 +4823,8 @@ class HAdd FINAL : public HArithmeticBinaryOperation {
class HSub FINAL : public HArithmeticBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right);
HValue* Canonicalize() OVERRIDE;
@@ -4915,16 +4854,12 @@ class HSub FINAL : public HArithmeticBinaryOperation {
class HMul FINAL : public HArithmeticBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right);
- static HInstruction* NewImul(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- HInstruction* instr = HMul::New(zone, context, left, right);
+ static HInstruction* NewImul(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right) {
+ HInstruction* instr = HMul::New(isolate, zone, context, left, right);
if (!instr->IsMul()) return instr;
HMul* mul = HMul::cast(instr);
// TODO(mstarzinger): Prevent bailout on minus zero for imul.
@@ -4963,10 +4898,8 @@ class HMul FINAL : public HArithmeticBinaryOperation {
class HMod FINAL : public HArithmeticBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right);
HValue* Canonicalize() OVERRIDE;
@@ -4997,10 +4930,8 @@ class HMod FINAL : public HArithmeticBinaryOperation {
class HDiv FINAL : public HArithmeticBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right);
HValue* Canonicalize() OVERRIDE;
@@ -5031,11 +4962,8 @@ class HMathMinMax FINAL : public HArithmeticBinaryOperation {
public:
enum Operation { kMathMin, kMathMax };
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right,
- Operation op);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right, Operation op);
Representation observed_input_representation(int index) OVERRIDE {
return RequiredInputRepresentation(index);
@@ -5079,11 +5007,8 @@ class HMathMinMax FINAL : public HArithmeticBinaryOperation {
class HBitwise FINAL : public HBitwiseBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- Token::Value op,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ Token::Value op, HValue* left, HValue* right);
Token::Value op() const { return op_; }
@@ -5141,10 +5066,8 @@ class HBitwise FINAL : public HBitwiseBinaryOperation {
class HShl FINAL : public HBitwiseBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right);
Range* InferRange(Zone* zone) OVERRIDE;
@@ -5172,10 +5095,8 @@ class HShl FINAL : public HBitwiseBinaryOperation {
class HShr FINAL : public HBitwiseBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right);
bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
if (right()->IsInteger32Constant()) {
@@ -5211,10 +5132,8 @@ class HShr FINAL : public HBitwiseBinaryOperation {
class HSar FINAL : public HBitwiseBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right);
bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
if (right()->IsInteger32Constant()) {
@@ -5250,10 +5169,8 @@ class HSar FINAL : public HBitwiseBinaryOperation {
class HRor FINAL : public HBitwiseBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right) {
return new(zone) HRor(context, left, right);
}
@@ -5589,14 +5506,10 @@ class HAllocate FINAL : public HTemplateInstruction<2> {
ComputeFlags(NOT_TENURED, type1) == ComputeFlags(NOT_TENURED, type2);
}
- static HAllocate* New(Zone* zone,
- HValue* context,
- HValue* size,
- HType type,
- PretenureFlag pretenure_flag,
- InstanceType instance_type,
- Handle<AllocationSite> allocation_site =
- Handle<AllocationSite>::null()) {
+ static HAllocate* New(
+ Isolate* isolate, Zone* zone, HValue* context, HValue* size, HType type,
+ PretenureFlag pretenure_flag, InstanceType instance_type,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null()) {
return new(zone) HAllocate(context, size, type, pretenure_flag,
instance_type, allocation_site);
}
@@ -5771,10 +5684,8 @@ class HAllocate FINAL : public HTemplateInstruction<2> {
class HStoreCodeEntry FINAL: public HTemplateInstruction<2> {
public:
- static HStoreCodeEntry* New(Zone* zone,
- HValue* context,
- HValue* function,
- HValue* code) {
+ static HStoreCodeEntry* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* function, HValue* code) {
return new(zone) HStoreCodeEntry(function, code);
}
@@ -5797,11 +5708,9 @@ class HStoreCodeEntry FINAL: public HTemplateInstruction<2> {
class HInnerAllocatedObject FINAL : public HTemplateInstruction<2> {
public:
- static HInnerAllocatedObject* New(Zone* zone,
- HValue* context,
- HValue* value,
- HValue* offset,
- HType type) {
+ static HInnerAllocatedObject* New(Isolate* isolate, Zone* zone,
+ HValue* context, HValue* value,
+ HValue* offset, HType type) {
return new(zone) HInnerAllocatedObject(value, offset, type);
}
@@ -6260,6 +6169,10 @@ class HObjectAccess FINAL {
return HObjectAccess(kInobject, WeakCell::kValueOffset);
}
+ static HObjectAccess ForWeakCellNext() {
+ return HObjectAccess(kInobject, WeakCell::kNextOffset);
+ }
+
static HObjectAccess ForAllocationMementoSite() {
return HObjectAccess(kInobject, AllocationMemento::kAllocationSiteOffset);
}
@@ -7002,6 +6915,14 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
SetChangesFlag(kMaps);
}
+ void MarkReceiverAsCell() {
+ bit_field_ = ReceiverIsCellField::update(bit_field_, true);
+ }
+
+ bool receiver_is_cell() const {
+ return ReceiverIsCellField::decode(bit_field_);
+ }
+
bool NeedsWriteBarrier() const {
DCHECK(!field_representation().IsDouble() ||
(FLAG_unbox_double_fields && access_.IsInobject()) ||
@@ -7010,6 +6931,7 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
if (field_representation().IsSmi()) return false;
if (field_representation().IsInteger32()) return false;
if (field_representation().IsExternal()) return false;
+ if (receiver_is_cell()) return false;
return StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), value(), dominator());
}
@@ -7069,6 +6991,7 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
class HasTransitionField : public BitField<bool, 0, 1> {};
class StoreModeField : public BitField<StoreFieldOrKeyedMode, 1, 1> {};
+ class ReceiverIsCellField : public BitField<bool, 2, 1> {};
HObjectAccess access_;
HValue* dominator_;
@@ -7080,12 +7003,12 @@ class HStoreNamedGeneric FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
Handle<String>, HValue*,
- StrictMode);
+ LanguageMode);
HValue* object() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
HValue* context() const { return OperandAt(2); }
Handle<String> name() const { return name_; }
- StrictMode strict_mode() const { return strict_mode_; }
+ LanguageMode language_mode() const { return language_mode_; }
std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE; // NOLINT
@@ -7096,13 +7019,9 @@ class HStoreNamedGeneric FINAL : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
private:
- HStoreNamedGeneric(HValue* context,
- HValue* object,
- Handle<String> name,
- HValue* value,
- StrictMode strict_mode)
- : name_(name),
- strict_mode_(strict_mode) {
+ HStoreNamedGeneric(HValue* context, HValue* object, Handle<String> name,
+ HValue* value, LanguageMode language_mode)
+ : name_(name), language_mode_(language_mode) {
SetOperandAt(0, object);
SetOperandAt(1, value);
SetOperandAt(2, context);
@@ -7110,7 +7029,7 @@ class HStoreNamedGeneric FINAL : public HTemplateInstruction<3> {
}
Handle<String> name_;
- StrictMode strict_mode_;
+ LanguageMode language_mode_;
};
@@ -7301,13 +7220,13 @@ class HStoreKeyed FINAL
class HStoreKeyedGeneric FINAL : public HTemplateInstruction<4> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
- HValue*, HValue*, StrictMode);
+ HValue*, HValue*, LanguageMode);
HValue* object() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* value() const { return OperandAt(2); }
HValue* context() const { return OperandAt(3); }
- StrictMode strict_mode() const { return strict_mode_; }
+ LanguageMode language_mode() const { return language_mode_; }
Representation RequiredInputRepresentation(int index) OVERRIDE {
// tagged[tagged] = tagged
@@ -7319,12 +7238,9 @@ class HStoreKeyedGeneric FINAL : public HTemplateInstruction<4> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
private:
- HStoreKeyedGeneric(HValue* context,
- HValue* object,
- HValue* key,
- HValue* value,
- StrictMode strict_mode)
- : strict_mode_(strict_mode) {
+ HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
+ HValue* value, LanguageMode language_mode)
+ : language_mode_(language_mode) {
SetOperandAt(0, object);
SetOperandAt(1, key);
SetOperandAt(2, value);
@@ -7332,15 +7248,14 @@ class HStoreKeyedGeneric FINAL : public HTemplateInstruction<4> {
SetAllSideEffects();
}
- StrictMode strict_mode_;
+ LanguageMode language_mode_;
};
class HTransitionElementsKind FINAL : public HTemplateInstruction<2> {
public:
- inline static HTransitionElementsKind* New(Zone* zone,
- HValue* context,
- HValue* object,
+ inline static HTransitionElementsKind* New(Isolate* isolate, Zone* zone,
+ HValue* context, HValue* object,
Handle<Map> original_map,
Handle<Map> transitioned_map) {
return new(zone) HTransitionElementsKind(context, object,
@@ -7409,14 +7324,11 @@ class HTransitionElementsKind FINAL : public HTemplateInstruction<2> {
class HStringAdd FINAL : public HBinaryOperation {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right,
- PretenureFlag pretenure_flag = NOT_TENURED,
- StringAddFlags flags = STRING_ADD_CHECK_BOTH,
- Handle<AllocationSite> allocation_site =
- Handle<AllocationSite>::null());
+ static HInstruction* New(
+ Isolate* isolate, Zone* zone, HValue* context, HValue* left,
+ HValue* right, PretenureFlag pretenure_flag = NOT_TENURED,
+ StringAddFlags flags = STRING_ADD_CHECK_BOTH,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
StringAddFlags flags() const { return flags_; }
PretenureFlag pretenure_flag() const { return pretenure_flag_; }
@@ -7510,8 +7422,7 @@ class HStringCharCodeAt FINAL : public HTemplateInstruction<3> {
class HStringCharFromCode FINAL : public HTemplateInstruction<2> {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* char_code);
Representation RequiredInputRepresentation(int index) OVERRIDE {
@@ -7630,12 +7541,10 @@ class HFunctionLiteral FINAL : public HTemplateInstruction<1> {
bool has_no_literals() const {
return HasNoLiteralsField::decode(bit_field_);
}
- bool is_arrow() const { return IsArrowFunction(kind()); }
- bool is_generator() const { return IsGeneratorFunction(kind()); }
- bool is_concise_method() const { return IsConciseMethod(kind()); }
- bool is_default_constructor() const { return IsDefaultConstructor(kind()); }
FunctionKind kind() const { return FunctionKindField::decode(bit_field_); }
- StrictMode strict_mode() const { return StrictModeField::decode(bit_field_); }
+ LanguageMode language_mode() const {
+ return LanguageModeField::decode(bit_field_);
+ }
private:
HFunctionLiteral(HValue* context, Handle<SharedFunctionInfo> shared,
@@ -7645,7 +7554,7 @@ class HFunctionLiteral FINAL : public HTemplateInstruction<1> {
bit_field_(FunctionKindField::encode(shared->kind()) |
PretenureField::encode(pretenure) |
HasNoLiteralsField::encode(shared->num_literals() == 0) |
- StrictModeField::encode(shared->strict_mode())) {
+ LanguageModeField::encode(shared->language_mode())) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
SetChangesFlag(kNewSpacePromotion);
@@ -7653,10 +7562,11 @@ class HFunctionLiteral FINAL : public HTemplateInstruction<1> {
bool IsDeletable() const OVERRIDE { return true; }
- class FunctionKindField : public BitField<FunctionKind, 0, 4> {};
- class PretenureField : public BitField<bool, 5, 1> {};
- class HasNoLiteralsField : public BitField<bool, 6, 1> {};
- class StrictModeField : public BitField<StrictMode, 7, 1> {};
+ class FunctionKindField : public BitField<FunctionKind, 0, 6> {};
+ class PretenureField : public BitField<bool, 6, 1> {};
+ class HasNoLiteralsField : public BitField<bool, 7, 1> {};
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ class LanguageModeField : public BitField<LanguageMode, 8, 2> {};
Handle<SharedFunctionInfo> shared_info_;
uint32_t bit_field_;
@@ -7761,10 +7671,8 @@ class HDateField FINAL : public HUnaryOperation {
class HSeqStringGetChar FINAL : public HTemplateInstruction<2> {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- String::Encoding encoding,
- HValue* string,
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ String::Encoding encoding, HValue* string,
HValue* index);
Representation RequiredInputRepresentation(int index) OVERRIDE {
diff --git a/deps/v8/src/hydrogen-types.cc b/deps/v8/src/hydrogen-types.cc
index a05e30fbe3..3bac69bb56 100644
--- a/deps/v8/src/hydrogen-types.cc
+++ b/deps/v8/src/hydrogen-types.cc
@@ -15,7 +15,7 @@ namespace internal {
template <class T>
HType HType::FromType(typename T::TypeHandle type) {
if (T::Any()->Is(type)) return HType::Any();
- if (type->Is(T::None())) return HType::None();
+ if (!type->IsInhabited()) return HType::None();
if (type->Is(T::SignedSmall())) return HType::Smi();
if (type->Is(T::Number())) return HType::TaggedNumber();
if (type->Is(T::Null())) return HType::Null();
@@ -24,6 +24,7 @@ HType HType::FromType(typename T::TypeHandle type) {
if (type->Is(T::Undefined())) return HType::Undefined();
if (type->Is(T::Array())) return HType::JSArray();
if (type->Is(T::Object())) return HType::JSObject();
+ if (type->Is(T::Receiver())) return HType::JSReceiver();
return HType::Tagged();
}
diff --git a/deps/v8/src/hydrogen-types.h b/deps/v8/src/hydrogen-types.h
index 70870dd90a..773a18ca7e 100644
--- a/deps/v8/src/hydrogen-types.h
+++ b/deps/v8/src/hydrogen-types.h
@@ -17,22 +17,23 @@ namespace internal {
template <typename T> class Handle;
class Object;
-#define HTYPE_LIST(V) \
- V(Any, 0x0) /* 0000 0000 0000 0000 */ \
- V(Tagged, 0x1) /* 0000 0000 0000 0001 */ \
- V(TaggedPrimitive, 0x5) /* 0000 0000 0000 0101 */ \
- V(TaggedNumber, 0xd) /* 0000 0000 0000 1101 */ \
- V(Smi, 0x1d) /* 0000 0000 0001 1101 */ \
- V(HeapObject, 0x21) /* 0000 0000 0010 0001 */ \
- V(HeapPrimitive, 0x25) /* 0000 0000 0010 0101 */ \
- V(Null, 0x27) /* 0000 0000 0010 0111 */ \
- V(HeapNumber, 0x2d) /* 0000 0000 0010 1101 */ \
- V(String, 0x65) /* 0000 0000 0110 0101 */ \
- V(Boolean, 0xa5) /* 0000 0000 1010 0101 */ \
- V(Undefined, 0x125) /* 0000 0001 0010 0101 */ \
- V(JSObject, 0x221) /* 0000 0010 0010 0001 */ \
- V(JSArray, 0x621) /* 0000 0110 0010 0001 */ \
- V(None, 0x7ff) /* 0000 0111 1111 1111 */
+#define HTYPE_LIST(V) \
+ V(Any, 0x0) /* 0000 0000 0000 0000 */ \
+ V(Tagged, 0x1) /* 0000 0000 0000 0001 */ \
+ V(TaggedPrimitive, 0x5) /* 0000 0000 0000 0101 */ \
+ V(TaggedNumber, 0xd) /* 0000 0000 0000 1101 */ \
+ V(Smi, 0x1d) /* 0000 0000 0001 1101 */ \
+ V(HeapObject, 0x21) /* 0000 0000 0010 0001 */ \
+ V(HeapPrimitive, 0x25) /* 0000 0000 0010 0101 */ \
+ V(Null, 0x27) /* 0000 0000 0010 0111 */ \
+ V(HeapNumber, 0x2d) /* 0000 0000 0010 1101 */ \
+ V(String, 0x65) /* 0000 0000 0110 0101 */ \
+ V(Boolean, 0xa5) /* 0000 0000 1010 0101 */ \
+ V(Undefined, 0x125) /* 0000 0001 0010 0101 */ \
+ V(JSReceiver, 0x221) /* 0000 0010 0010 0001 */ \
+ V(JSObject, 0x621) /* 0000 0110 0010 0001 */ \
+ V(JSArray, 0xe21) /* 0000 1110 0010 0001 */ \
+ V(None, 0xfff) /* 0000 1111 1111 1111 */
class HType FINAL {
public:
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index e8fa84f264..d73a3ebac4 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -51,6 +51,8 @@
#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
@@ -127,8 +129,7 @@ void HBasicBlock::RemovePhi(HPhi* phi) {
}
-void HBasicBlock::AddInstruction(HInstruction* instr,
- HSourcePosition position) {
+void HBasicBlock::AddInstruction(HInstruction* instr, SourcePosition position) {
DCHECK(!IsStartBlock() || !IsFinished());
DCHECK(!instr->IsLinked());
DCHECK(!IsFinished());
@@ -197,7 +198,7 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
}
-void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
+void HBasicBlock::Finish(HControlInstruction* end, SourcePosition position) {
DCHECK(!IsFinished());
AddInstruction(end, position);
end_ = end;
@@ -207,10 +208,8 @@ void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
}
-void HBasicBlock::Goto(HBasicBlock* block,
- HSourcePosition position,
- FunctionState* state,
- bool add_simulate) {
+void HBasicBlock::Goto(HBasicBlock* block, SourcePosition position,
+ FunctionState* state, bool add_simulate) {
bool drop_extra = state != NULL &&
state->inlining_kind() == NORMAL_RETURN;
@@ -229,9 +228,8 @@ void HBasicBlock::Goto(HBasicBlock* block,
}
-void HBasicBlock::AddLeaveInlined(HValue* return_value,
- FunctionState* state,
- HSourcePosition position) {
+void HBasicBlock::AddLeaveInlined(HValue* return_value, FunctionState* state,
+ SourcePosition position) {
HBasicBlock* target = state->function_return();
bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
@@ -651,7 +649,7 @@ HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
if (!pointer->is_set()) {
// Can't pass GetInvalidContext() to HConstant::New, because that will
// recursively call GetConstant
- HConstant* constant = HConstant::New(zone(), NULL, value);
+ HConstant* constant = HConstant::New(isolate(), zone(), NULL, value);
constant->InsertAfter(entry_block()->first());
pointer->set(constant);
return constant;
@@ -929,7 +927,7 @@ void HGraphBuilder::IfBuilder::Else() {
}
-void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
+void HGraphBuilder::IfBuilder::Deopt(Deoptimizer::DeoptReason reason) {
DCHECK(did_then_);
builder()->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
AddMergeAtJoinBlock(true);
@@ -1025,8 +1023,9 @@ void HGraphBuilder::IfBuilder::End() {
current = merge_at_join_blocks_;
while (current != NULL) {
if (current->deopt_ && current->block_ != NULL) {
- current->block_->FinishExit(HAbnormalExit::New(builder()->zone(), NULL),
- HSourcePosition::Unknown());
+ current->block_->FinishExit(
+ HAbnormalExit::New(builder()->isolate(), builder()->zone(), NULL),
+ SourcePosition::Unknown());
}
current = current->next_;
}
@@ -1096,11 +1095,12 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
builder_->set_current_block(body_block_);
if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
+ Isolate* isolate = builder_->isolate();
HValue* one = builder_->graph()->GetConstant1();
if (direction_ == kPreIncrement) {
- increment_ = HAdd::New(zone(), context_, phi_, one);
+ increment_ = HAdd::New(isolate, zone(), context_, phi_, one);
} else {
- increment_ = HSub::New(zone(), context_, phi_, one);
+ increment_ = HSub::New(isolate, zone(), context_, phi_, one);
}
increment_->ClearFlag(HValue::kCanOverflow);
builder_->AddInstruction(increment_);
@@ -1142,10 +1142,13 @@ void HGraphBuilder::LoopBuilder::EndBody() {
DCHECK(!finished_);
if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
+ Isolate* isolate = builder_->isolate();
if (direction_ == kPostIncrement) {
- increment_ = HAdd::New(zone(), context_, phi_, increment_amount_);
+ increment_ =
+ HAdd::New(isolate, zone(), context_, phi_, increment_amount_);
} else {
- increment_ = HSub::New(zone(), context_, phi_, increment_amount_);
+ increment_ =
+ HSub::New(isolate, zone(), context_, phi_, increment_amount_);
}
increment_->ClearFlag(HValue::kCanOverflow);
builder_->AddInstruction(increment_);
@@ -1266,7 +1269,8 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
}
-void HGraphBuilder::FinishExitWithHardDeoptimization(const char* reason) {
+void HGraphBuilder::FinishExitWithHardDeoptimization(
+ Deoptimizer::DeoptReason reason) {
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
FinishExitCurrentBlock(New<HAbnormalExit>());
}
@@ -1290,7 +1294,7 @@ HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
Handle<JSFunction> f = Handle<JSFunction>::cast(
HConstant::cast(function)->handle(isolate()));
SharedFunctionInfo* shared = f->shared();
- if (shared->strict_mode() == STRICT || shared->native()) return object;
+ if (is_strict(shared->language_mode()) || shared->native()) return object;
}
return Add<HWrapReceiver>(object, function);
}
@@ -1582,7 +1586,7 @@ void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
if_global_object.If<HCompareNumericAndBranch>(instance_type,
min_global_type,
Token::GTE);
- if_global_object.ThenDeopt("receiver was a global object");
+ if_global_object.ThenDeopt(Deoptimizer::kReceiverWasAGlobalObject);
if_global_object.End();
}
@@ -1912,7 +1916,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
if_objectissmi.Else();
{
if (type->Is(Type::SignedSmall())) {
- if_objectissmi.Deopt("Expected smi");
+ if_objectissmi.Deopt(Deoptimizer::kExpectedSmi);
} else {
// Check if the object is a heap number.
IfBuilder if_objectisnumber(this);
@@ -1967,7 +1971,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
if_objectisnumber.Else();
{
if (type->Is(Type::Number())) {
- if_objectisnumber.Deopt("Expected heap number");
+ if_objectisnumber.Deopt(Deoptimizer::kExpectedHeapNumber);
}
}
if_objectisnumber.JoinContinuation(&found);
@@ -2440,7 +2444,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
negative_checker.Then();
HInstruction* result = AddElementAccess(
backing_store, key, val, bounds_check, elements_kind, access_type);
- negative_checker.ElseDeopt("Negative key encountered");
+ negative_checker.ElseDeopt(Deoptimizer::kNegativeKeyEncountered);
negative_checker.End();
length_checker.End();
return result;
@@ -2525,7 +2529,8 @@ HValue* HGraphBuilder::BuildAllocateArrayFromLength(
if (!(top_info()->IsStub()) &&
IsFastPackedElementsKind(array_builder->kind())) {
// We'll come back later with better (holey) feedback.
- if_builder.Deopt("Holey array despite packed elements_kind feedback");
+ if_builder.Deopt(
+ Deoptimizer::kHoleyArrayDespitePackedElements_kindFeedback);
} else {
Push(checked_length); // capacity
Push(checked_length); // length
@@ -2546,9 +2551,9 @@ HValue* HGraphBuilder::BuildCalculateElementsSize(ElementsKind kind,
: kPointerSize;
HConstant* elements_size_value = Add<HConstant>(elements_size);
- HInstruction* mul = HMul::NewImul(zone(), context(),
- capacity->ActualValue(),
- elements_size_value);
+ HInstruction* mul =
+ HMul::NewImul(isolate(), zone(), context(), capacity->ActualValue(),
+ elements_size_value);
AddInstruction(mul);
mul->ClearFlag(HValue::kCanOverflow);
@@ -2799,12 +2804,10 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
HValue* to) {
// Fast elements kinds need to be initialized in case statements below cause a
// garbage collection.
- Factory* factory = isolate()->factory();
- double nan_double = FixedDoubleArray::hole_nan_as_double();
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
- ? Add<HConstant>(factory->the_hole_value())
- : Add<HConstant>(nan_double);
+ ? graph()->GetConstantHole()
+ : Add<HConstant>(HConstant::kHoleNaN);
// Since we're about to store a hole value, the store instruction below must
// assume an elements kind that supports heap object values.
@@ -2902,8 +2905,8 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
if_hole.If<HCompareHoleAndBranch>(element);
if_hole.Then();
HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
- ? Add<HConstant>(FixedDoubleArray::hole_nan_as_double())
- : graph()->GetConstantHole();
+ ? Add<HConstant>(HConstant::kHoleNaN)
+ : graph()->GetConstantHole();
Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
if_hole.Else();
HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
@@ -3066,14 +3069,14 @@ void HGraphBuilder::BuildCompareNil(HValue* value, Type* type,
Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
IfBuilder map_check(this);
map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
- map_check.ThenDeopt("Unknown map");
+ map_check.ThenDeopt(Deoptimizer::kUnknownMap);
map_check.End();
} else {
DCHECK(map_embedding == kEmbedMapsDirectly);
Add<HCheckMaps>(value, type->Classes().Current());
}
} else {
- if_nil.Deopt("Too many undetectable types");
+ if_nil.Deopt(Deoptimizer::kTooManyUndetectableTypes);
}
}
@@ -3344,7 +3347,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_ = &initial_function_state_;
- InitializeAstVisitor(info->zone());
+ InitializeAstVisitor(info->isolate(), info->zone());
if (FLAG_hydrogen_track_positions) {
SetSourcePosition(info->shared_info()->start_position());
}
@@ -3415,7 +3418,7 @@ HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
void HBasicBlock::FinishExit(HControlInstruction* instruction,
- HSourcePosition position) {
+ SourcePosition position) {
Finish(instruction, position);
ClearEnvironment();
}
@@ -3443,17 +3446,14 @@ HGraph::HGraph(CompilationInfo* info)
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
- disallow_adding_new_values_(false),
- inlined_functions_(FLAG_hydrogen_track_positions ? 5 : 0, info->zone()),
- inlining_id_to_function_id_(FLAG_hydrogen_track_positions ? 5 : 0,
- info->zone()) {
+ disallow_adding_new_values_(false) {
if (info->IsStub()) {
CallInterfaceDescriptor descriptor =
info->code_stub()->GetCallInterfaceDescriptor();
start_environment_ = new (zone_)
HEnvironment(zone_, descriptor.GetEnvironmentParameterCount());
} else {
- TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown());
+ info->TraceInlinedFunction(info->shared_info(), SourcePosition::Unknown());
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
@@ -3481,71 +3481,14 @@ void HGraph::FinalizeUniqueness() {
}
-int HGraph::TraceInlinedFunction(
- Handle<SharedFunctionInfo> shared,
- HSourcePosition position) {
- if (!FLAG_hydrogen_track_positions) {
- return 0;
- }
-
- int id = 0;
- for (; id < inlined_functions_.length(); id++) {
- if (inlined_functions_[id].shared().is_identical_to(shared)) {
- break;
- }
- }
-
- if (id == inlined_functions_.length()) {
- inlined_functions_.Add(InlinedFunctionInfo(shared), zone());
-
- if (!shared->script()->IsUndefined()) {
- Handle<Script> script(Script::cast(shared->script()));
- if (!script->source()->IsUndefined()) {
- CodeTracer::Scope tracing_scopex(isolate()->GetCodeTracer());
- OFStream os(tracing_scopex.file());
- os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get()
- << ") id{" << info()->optimization_id() << "," << id << "} ---\n";
- {
- StringCharacterStream stream(String::cast(script->source()),
- shared->start_position());
- // fun->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len =
- shared->end_position() - shared->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.HasMore()) {
- os << AsReversiblyEscapedUC16(stream.GetNext());
- }
- }
- }
-
- os << "\n--- END ---\n";
- }
- }
- }
-
- int inline_id = inlining_id_to_function_id_.length();
- inlining_id_to_function_id_.Add(id, zone());
-
- if (inline_id != 0) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
- << info()->optimization_id() << "," << id << "} AS " << inline_id
- << " AT " << position << std::endl;
- }
-
- return inline_id;
-}
-
-
-int HGraph::SourcePositionToScriptPosition(HSourcePosition pos) {
+int HGraph::SourcePositionToScriptPosition(SourcePosition pos) {
if (!FLAG_hydrogen_track_positions || pos.IsUnknown()) {
return pos.raw();
}
- const int id = inlining_id_to_function_id_[pos.inlining_id()];
- return inlined_functions_[id].start_position() + pos.position();
+ const int id = info()->inlining_id_to_function_id()->at(pos.inlining_id());
+ return info()->inlined_function_infos()->at(id).start_position() +
+ pos.position();
}
@@ -3932,8 +3875,7 @@ void HGraph::CollectPhis() {
// Implementation of utility class to encapsulate the translation state for
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
- CompilationInfo* info,
- InliningKind inlining_kind,
+ CompilationInfo* info, InliningKind inlining_kind,
int inlining_id)
: owner_(owner),
compilation_info_(info),
@@ -3945,7 +3887,7 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
arguments_object_(NULL),
arguments_elements_(NULL),
inlining_id_(inlining_id),
- outer_source_position_(HSourcePosition::Unknown()),
+ outer_source_position_(SourcePosition::Unknown()),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
@@ -4277,6 +4219,11 @@ void HOptimizedGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs,
bool HOptimizedGraphBuilder::BuildGraph() {
+ if (IsSubclassConstructor(current_info()->function()->kind())) {
+ Bailout(kSuperReference);
+ return false;
+ }
+
Scope* scope = current_info()->scope();
SetUpScope(scope);
@@ -4516,11 +4463,12 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
environment()->Bind(scope->arguments(),
graph()->GetArgumentsObject());
}
-}
-
-Type* HOptimizedGraphBuilder::ToType(Handle<Map> map) {
- return IC::MapToType<Type>(map, zone());
+ int rest_index;
+ Variable* rest = scope->rest_parameter(&rest_index);
+ if (rest) {
+ return Bailout(kRestParameter);
+ }
}
@@ -4558,7 +4506,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
AddInstruction(function);
// Allocate a block context and store it to the stack frame.
HInstruction* inner_context = Add<HAllocateBlockContext>(
- outer_context, function, scope->GetScopeInfo());
+ outer_context, function, scope->GetScopeInfo(isolate()));
HInstruction* instr = Add<HStoreFrameContext>(inner_context);
set_scope(scope);
environment()->BindContext(inner_context);
@@ -5378,6 +5326,14 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
&lookup)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
script_contexts, lookup.context_index);
+ Handle<Object> current_value =
+ FixedArray::get(script_context, lookup.context_index);
+
+ // If the values is not the hole, it will stay initialized,
+ // so no need to generate a check.
+ if (*current_value == *isolate()->factory()->the_hole_value()) {
+ return Bailout(kReferenceToUninitializedVariable);
+ }
HInstruction* result = New<HLoadNamedField>(
Add<HConstant>(script_context), nullptr,
HObjectAccess::ForContextSlot(lookup.slot_index));
@@ -5484,10 +5440,9 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-static bool CanInlinePropertyAccess(Type* type) {
- if (type->Is(Type::NumberOrString())) return true;
- if (!type->IsClass()) return false;
- Handle<Map> map = type->AsClass()->Map();
+static bool CanInlinePropertyAccess(Handle<Map> map) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) return true;
+ if (map->instance_type() < FIRST_NONSTRING_TYPE) return true;
return map->IsJSObjectMap() &&
!map->is_dictionary_map() &&
!map->has_named_interceptor();
@@ -5541,7 +5496,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
int limit = boilerplate->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
+ if (details.type() != DATA) continue;
if ((*max_properties)-- == 0) return false;
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
@@ -5565,6 +5520,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
+
expr->BuildConstantProperties(isolate());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
HInstruction* literal;
@@ -5620,9 +5576,10 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->is_computed_name()) return Bailout(kComputedPropertyName);
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
switch (property->kind()) {
@@ -5648,17 +5605,17 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
Handle<Map> map = property->GetReceiverType();
- Handle<String> name = property->key()->AsPropertyName();
+ Handle<String> name = key->AsPropertyName();
HInstruction* store;
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
CHECK_ALIVE(store = BuildNamedGeneric(
STORE, NULL, literal, name, value));
} else {
- PropertyAccessInfo info(this, STORE, ToType(map), name);
+ PropertyAccessInfo info(this, STORE, map, name);
if (info.CanAccessMonomorphic()) {
HValue* checked_literal = Add<HCheckMaps>(literal, map);
- DCHECK(!info.IsAccessor());
+ DCHECK(!info.IsAccessorConstant());
store = BuildMonomorphicAccess(
&info, literal, checked_literal, value,
BailoutId::None(), BailoutId::None());
@@ -5944,20 +5901,16 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
PropertyAccessInfo* info) {
- if (!CanInlinePropertyAccess(type_)) return false;
+ if (!CanInlinePropertyAccess(map_)) return false;
// Currently only handle Type::Number as a polymorphic case.
// TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
// instruction.
- if (type_->Is(Type::Number())) return false;
+ if (IsNumberType()) return false;
// Values are only compatible for monomorphic load if they all behave the same
// regarding value wrappers.
- if (type_->Is(Type::NumberOrString())) {
- if (!info->type_->Is(Type::NumberOrString())) return false;
- } else {
- if (info->type_->Is(Type::NumberOrString())) return false;
- }
+ if (IsValueWrapped() != info->IsValueWrapped()) return false;
if (!LookupDescriptor()) return false;
@@ -5970,17 +5923,17 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
// chain.
if (info->has_holder()) return false;
- if (IsAccessor()) {
+ if (IsAccessorConstant()) {
return accessor_.is_identical_to(info->accessor_) &&
api_holder_.is_identical_to(info->api_holder_);
}
- if (IsConstant()) {
+ if (IsDataConstant()) {
return constant_.is_identical_to(info->constant_);
}
- DCHECK(IsField());
- if (!info->IsField()) return false;
+ DCHECK(IsData());
+ if (!info->IsData()) return false;
Representation r = access_.representation();
if (IsLoad()) {
@@ -6016,9 +5969,9 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
- if (!type_->IsClass()) return true;
- map()->LookupDescriptor(NULL, *name_, &lookup_);
- return LoadResult(map());
+ if (!map_->IsJSObjectMap()) return true;
+ lookup_.LookupDescriptor(*map_, *name_);
+ return LoadResult(map_);
}
@@ -6027,14 +5980,14 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
return false;
}
- if (IsField()) {
+ if (IsData()) {
// Construct the object field access.
int index = GetLocalFieldIndexFromMap(map);
access_ = HObjectAccess::ForField(map, index, representation(), name_);
// Load field map for heap objects.
LoadFieldMaps(map);
- } else if (IsAccessor()) {
+ } else if (IsAccessorConstant()) {
Handle<Object> accessors = GetAccessorsFromMap(map);
if (!accessors->IsAccessorPair()) return false;
Object* raw_accessor =
@@ -6046,13 +5999,12 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
CallOptimization call_optimization(accessor);
if (call_optimization.is_simple_api_call()) {
CallOptimization::HolderLookup holder_lookup;
- Handle<Map> receiver_map = this->map();
- api_holder_ = call_optimization.LookupHolderOfExpectedType(
- receiver_map, &holder_lookup);
+ api_holder_ =
+ call_optimization.LookupHolderOfExpectedType(map_, &holder_lookup);
}
}
accessor_ = accessor;
- } else if (IsConstant()) {
+ } else if (IsDataConstant()) {
constant_ = GetConstantFromMap(map);
}
@@ -6106,11 +6058,11 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
JSObject::TryMigrateInstance(holder_);
}
map = Handle<Map>(holder_->map());
- if (!CanInlinePropertyAccess(ToType(map))) {
+ if (!CanInlinePropertyAccess(map)) {
lookup_.NotFound();
return false;
}
- map->LookupDescriptor(*holder_, *name_, &lookup_);
+ lookup_.LookupDescriptor(*map, *name_);
if (IsFound()) return LoadResult(map);
}
lookup_.NotFound();
@@ -6119,10 +6071,9 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
- if (!CanInlinePropertyAccess(type_)) return false;
+ if (!CanInlinePropertyAccess(map_)) return false;
if (IsJSObjectFieldAccessor()) return IsLoad();
- if (this->map()->function_with_prototype() &&
- !this->map()->has_non_instance_prototype() &&
+ if (map_->function_with_prototype() && !map_->has_non_instance_prototype() &&
name_.is_identical_to(isolate()->factory()->prototype_string())) {
return IsLoad();
}
@@ -6131,19 +6082,18 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
if (!LookupInPrototypes()) return false;
if (IsLoad()) return true;
- if (IsAccessor()) return true;
- Handle<Map> map = this->map();
- map->LookupTransition(NULL, *name_, NONE, &lookup_);
- if (lookup_.IsTransitionToField() && map->unused_property_fields() > 0) {
+ if (IsAccessorConstant()) return true;
+ lookup_.LookupTransition(*map_, *name_, NONE);
+ if (lookup_.IsTransitionToData() && map_->unused_property_fields() > 0) {
// Construct the object field access.
int descriptor = transition()->LastAdded();
int index =
transition()->instance_descriptors()->GetFieldIndex(descriptor) -
- map->inobject_properties();
+ map_->inobject_properties();
PropertyDetails details =
transition()->instance_descriptors()->GetDetails(descriptor);
Representation representation = details.representation();
- access_ = HObjectAccess::ForField(map, index, representation, name_);
+ access_ = HObjectAccess::ForField(map_, index, representation, name_);
// Load field map for heap objects.
LoadFieldMaps(transition());
@@ -6154,17 +6104,16 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
- SmallMapList* types) {
- DCHECK(type_->Is(ToType(types->first())));
+ SmallMapList* maps) {
+ DCHECK(map_.is_identical_to(maps->first()));
if (!CanAccessMonomorphic()) return false;
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
- if (types->length() > kMaxLoadPolymorphism) return false;
+ if (maps->length() > kMaxLoadPolymorphism) return false;
HObjectAccess access = HObjectAccess::ForMap(); // bogus default
if (GetJSObjectFieldAccess(&access)) {
- for (int i = 1; i < types->length(); ++i) {
- PropertyAccessInfo test_info(
- builder_, access_type_, ToType(types->at(i)), name_);
+ for (int i = 1; i < maps->length(); ++i) {
+ PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
HObjectAccess test_access = HObjectAccess::ForMap(); // bogus default
if (!test_info.GetJSObjectFieldAccess(&test_access)) return false;
if (!access.Equals(test_access)) return false;
@@ -6172,18 +6121,17 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
return true;
}
- // Currently only handle Type::Number as a polymorphic case.
+ // Currently only handle numbers as a polymorphic case.
// TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
// instruction.
- if (type_->Is(Type::Number())) return false;
+ if (IsNumberType()) return false;
// Multiple maps cannot transition to the same target map.
DCHECK(!IsLoad() || !IsTransition());
- if (IsTransition() && types->length() > 1) return false;
+ if (IsTransition() && maps->length() > 1) return false;
- for (int i = 1; i < types->length(); ++i) {
- PropertyAccessInfo test_info(
- builder_, access_type_, ToType(types->at(i)), name_);
+ for (int i = 1; i < maps->length(); ++i) {
+ PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
if (!test_info.IsCompatible(this)) return false;
}
@@ -6193,16 +6141,22 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
Handle<Map> HOptimizedGraphBuilder::PropertyAccessInfo::map() {
JSFunction* ctor = IC::GetRootConstructor(
- type_, current_info()->closure()->context()->native_context());
+ *map_, current_info()->closure()->context()->native_context());
if (ctor != NULL) return handle(ctor->initial_map());
- return type_->AsClass()->Map();
+ return map_;
+}
+
+
+static bool NeedsWrapping(Handle<Map> map, Handle<JSFunction> target) {
+ return !map->IsJSObjectMap() &&
+ is_sloppy(target->shared()->language_mode()) &&
+ !target->shared()->native();
}
-static bool NeedsWrappingFor(Type* type, Handle<JSFunction> target) {
- return type->Is(Type::NumberOrString()) &&
- target->shared()->strict_mode() == SLOPPY &&
- !target->shared()->native();
+bool HOptimizedGraphBuilder::PropertyAccessInfo::NeedsWrappingFor(
+ Handle<JSFunction> target) const {
+ return NeedsWrapping(map_, target);
}
@@ -6238,7 +6192,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
return graph()->GetConstantUndefined();
}
- if (info->IsField()) {
+ if (info->IsData()) {
if (info->IsLoad()) {
return BuildLoadNamedField(info, checked_holder);
} else {
@@ -6251,7 +6205,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
return BuildStoreNamedField(info, checked_object, value);
}
- if (info->IsAccessor()) {
+ if (info->IsAccessorConstant()) {
Push(checked_object);
int argument_count = 1;
if (!info->IsLoad()) {
@@ -6259,7 +6213,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
Push(value);
}
- if (NeedsWrappingFor(info->type(), info->accessor())) {
+ if (info->NeedsWrappingFor(info->accessor())) {
HValue* function = Add<HConstant>(info->accessor());
PushArgumentsFromEnvironment(argument_count);
return New<HCallFunction>(function, argument_count, WRAP_AND_CALL);
@@ -6275,7 +6229,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
return BuildCallConstantFunction(info->accessor(), argument_count);
}
- DCHECK(info->IsConstant());
+ DCHECK(info->IsDataConstant());
if (info->IsLoad()) {
return New<HConstant>(info->constant());
} else {
@@ -6285,13 +6239,8 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
- PropertyAccessType access_type,
- Expression* expr,
- BailoutId ast_id,
- BailoutId return_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
+ PropertyAccessType access_type, Expression* expr, BailoutId ast_id,
+ BailoutId return_id, HValue* object, HValue* value, SmallMapList* maps,
Handle<String> name) {
// Something did not match; must use a polymorphic load.
int count = 0;
@@ -6302,33 +6251,33 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
bool handle_smi = false;
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
int i;
- for (i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
- if (info.type()->Is(Type::String())) {
+ for (i = 0; i < maps->length() && count < kMaxLoadPolymorphism; ++i) {
+ PropertyAccessInfo info(this, access_type, maps->at(i), name);
+ if (info.IsStringType()) {
if (handled_string) continue;
handled_string = true;
}
if (info.CanAccessMonomorphic()) {
count++;
- if (info.type()->Is(Type::Number())) {
+ if (info.IsNumberType()) {
handle_smi = true;
break;
}
}
}
- if (i < types->length()) {
+ if (i < maps->length()) {
count = -1;
- types->Clear();
+ maps->Clear();
} else {
count = 0;
}
HControlInstruction* smi_check = NULL;
handled_string = false;
- for (i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
- if (info.type()->Is(Type::String())) {
+ for (i = 0; i < maps->length() && count < kMaxLoadPolymorphism; ++i) {
+ PropertyAccessInfo info(this, access_type, maps->at(i), name);
+ if (info.IsStringType()) {
if (handled_string) continue;
handled_string = true;
}
@@ -6355,11 +6304,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
HUnaryControlInstruction* compare;
HValue* dependency;
- if (info.type()->Is(Type::Number())) {
+ if (info.IsNumberType()) {
Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
compare = New<HCompareMap>(object, heap_number_map, if_true, if_false);
dependency = smi_check;
- } else if (info.type()->Is(Type::String())) {
+ } else if (info.IsStringType()) {
compare = New<HIsStringAndBranch>(object, if_true, if_false);
dependency = compare;
} else {
@@ -6368,7 +6317,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
}
FinishCurrentBlock(compare);
- if (info.type()->Is(Type::Number())) {
+ if (info.IsNumberType()) {
GotoNoSimulate(if_true, number_block);
if_true = number_block;
}
@@ -6403,8 +6352,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
// Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
- if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization("Unknown map in polymorphic access");
+ if (count == maps->length() && FLAG_deoptimize_uncommon_cases) {
+ FinishExitWithHardDeoptimization(
+ Deoptimizer::kUnknownMapInPolymorphicAccess);
} else {
HInstruction* instr = BuildNamedGeneric(access_type, expr, object, name,
value);
@@ -6435,22 +6385,21 @@ static bool ComputeReceiverTypes(Expression* expr,
HValue* receiver,
SmallMapList** t,
Zone* zone) {
- SmallMapList* types = expr->GetReceiverTypes();
- *t = types;
+ SmallMapList* maps = expr->GetReceiverTypes();
+ *t = maps;
bool monomorphic = expr->IsMonomorphic();
- if (types != NULL && receiver->HasMonomorphicJSObjectType()) {
+ if (maps != NULL && receiver->HasMonomorphicJSObjectType()) {
Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
- types->FilterForPossibleTransitions(root_map);
- monomorphic = types->length() == 1;
+ maps->FilterForPossibleTransitions(root_map);
+ monomorphic = maps->length() == 1;
}
- return monomorphic &&
- CanInlinePropertyAccess(IC::MapToType<Type>(types->first(), zone));
+ return monomorphic && CanInlinePropertyAccess(maps->first());
}
-static bool AreStringTypes(SmallMapList* types) {
- for (int i = 0; i < types->length(); i++) {
- if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+static bool AreStringTypes(SmallMapList* maps) {
+ for (int i = 0; i < maps->length(); i++) {
+ if (maps->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
}
return true;
}
@@ -6551,7 +6500,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
if (value->IsConstant()) {
HConstant* c_value = HConstant::cast(value);
if (!constant.is_identical_to(c_value->handle(isolate()))) {
- Add<HDeoptimize>("Constant global variable assignment",
+ Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
Deoptimizer::EAGER);
}
} else {
@@ -6564,7 +6513,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
builder.Then();
builder.Else();
- Add<HDeoptimize>("Constant global variable assignment",
+ Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
Deoptimizer::EAGER);
builder.End();
}
@@ -6578,9 +6527,8 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HValue* global_object = Add<HLoadNamedField>(
context(), nullptr,
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- HStoreNamedGeneric* instr =
- Add<HStoreNamedGeneric>(global_object, var->name(),
- value, function_strict_mode());
+ HStoreNamedGeneric* instr = Add<HStoreNamedGeneric>(
+ global_object, var->name(), value, function_language_mode());
USE(instr);
DCHECK(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6885,8 +6833,9 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
HValue* value,
bool is_uninitialized) {
if (is_uninitialized) {
- Add<HDeoptimize>("Insufficient type feedback for generic named access",
- Deoptimizer::SOFT);
+ Add<HDeoptimize>(
+ Deoptimizer::kInsufficientTypeFeedbackForGenericNamedAccess,
+ Deoptimizer::SOFT);
}
if (access_type == LOAD) {
HLoadNamedGeneric* result = New<HLoadNamedGeneric>(object, name);
@@ -6900,7 +6849,8 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
}
return result;
} else {
- return New<HStoreNamedGeneric>(object, name, value, function_strict_mode());
+ return New<HStoreNamedGeneric>(object, name, value,
+ function_language_mode());
}
}
@@ -6924,7 +6874,8 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
}
return result;
} else {
- return New<HStoreKeyedGeneric>(object, key, value, function_strict_mode());
+ return New<HStoreKeyedGeneric>(object, key, value,
+ function_language_mode());
}
}
@@ -7183,7 +7134,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
DCHECK(join->predecessors()->length() > 0);
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization("Unknown map in polymorphic element access");
+ FinishExitWithHardDeoptimization(
+ Deoptimizer::kUnknownMapInPolymorphicElementAccess);
set_current_block(join);
return access_type == STORE ? val : Pop();
}
@@ -7221,8 +7173,8 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
DCHECK(!expr->IsPropertyName());
HInstruction* instr = NULL;
- SmallMapList* types;
- bool monomorphic = ComputeReceiverTypes(expr, obj, &types, zone());
+ SmallMapList* maps;
+ bool monomorphic = ComputeReceiverTypes(expr, obj, &maps, zone());
bool force_generic = false;
if (expr->GetKeyType() == PROPERTY) {
@@ -7232,13 +7184,13 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
force_generic = true;
monomorphic = false;
} else if (access_type == STORE &&
- (monomorphic || (types != NULL && !types->is_empty()))) {
+ (monomorphic || (maps != NULL && !maps->is_empty()))) {
// Stores can't be mono/polymorphic if their prototype chain has dictionary
// elements. However a receiver map that has dictionary elements itself
// should be left to normal mono/poly behavior (the other maps may benefit
// from highly optimized stores).
- for (int i = 0; i < types->length(); i++) {
- Handle<Map> current_map = types->at(i);
+ for (int i = 0; i < maps->length(); i++) {
+ Handle<Map> current_map = maps->at(i);
if (current_map->DictionaryElementsInPrototypeChainOnly()) {
force_generic = true;
monomorphic = false;
@@ -7246,13 +7198,13 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
}
}
} else if (access_type == LOAD && !monomorphic &&
- (types != NULL && !types->is_empty())) {
+ (maps != NULL && !maps->is_empty())) {
// Polymorphic loads have to go generic if any of the maps are strings.
// If some, but not all of the maps are strings, we should go generic
// because polymorphic access wants to key on ElementsKind and isn't
// compatible with strings.
- for (int i = 0; i < types->length(); i++) {
- Handle<Map> current_map = types->at(i);
+ for (int i = 0; i < maps->length(); i++) {
+ Handle<Map> current_map = maps->at(i);
if (current_map->IsStringMap()) {
force_generic = true;
break;
@@ -7261,7 +7213,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
}
if (monomorphic) {
- Handle<Map> map = types->first();
+ Handle<Map> map = maps->first();
if (!CanInlineElementAccess(map)) {
instr = AddInstruction(BuildKeyedGeneric(access_type, expr, obj, key,
val));
@@ -7270,20 +7222,20 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
instr = BuildMonomorphicElementAccess(
obj, key, val, NULL, map, access_type, expr->GetStoreMode());
}
- } else if (!force_generic && (types != NULL && !types->is_empty())) {
- return HandlePolymorphicElementAccess(
- expr, obj, key, val, types, access_type,
- expr->GetStoreMode(), has_side_effects);
+ } else if (!force_generic && (maps != NULL && !maps->is_empty())) {
+ return HandlePolymorphicElementAccess(expr, obj, key, val, maps,
+ access_type, expr->GetStoreMode(),
+ has_side_effects);
} else {
if (access_type == STORE) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
- Add<HDeoptimize>("Insufficient type feedback for keyed store",
+ Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedStore,
Deoptimizer::SOFT);
}
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
- Add<HDeoptimize>("Insufficient type feedback for keyed load",
+ Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedLoad,
Deoptimizer::SOFT);
}
}
@@ -7382,27 +7334,27 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedAccess(
Handle<String> name,
HValue* value,
bool is_uninitialized) {
- SmallMapList* types;
- ComputeReceiverTypes(expr, object, &types, zone());
- DCHECK(types != NULL);
-
- if (types->length() > 0) {
- PropertyAccessInfo info(this, access, ToType(types->first()), name);
- if (!info.CanAccessAsMonomorphic(types)) {
- HandlePolymorphicNamedFieldAccess(
- access, expr, ast_id, return_id, object, value, types, name);
+ SmallMapList* maps;
+ ComputeReceiverTypes(expr, object, &maps, zone());
+ DCHECK(maps != NULL);
+
+ if (maps->length() > 0) {
+ PropertyAccessInfo info(this, access, maps->first(), name);
+ if (!info.CanAccessAsMonomorphic(maps)) {
+ HandlePolymorphicNamedFieldAccess(access, expr, ast_id, return_id, object,
+ value, maps, name);
return NULL;
}
HValue* checked_object;
// Type::Number() is only supported by polymorphic load/call handling.
- DCHECK(!info.type()->Is(Type::Number()));
+ DCHECK(!info.IsNumberType());
BuildCheckHeapObject(object);
- if (AreStringTypes(types)) {
+ if (AreStringTypes(maps)) {
checked_object =
Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
} else {
- checked_object = Add<HCheckMaps>(object, types);
+ checked_object = Add<HCheckMaps>(object, maps);
}
return BuildMonomorphicAccess(
&info, object, checked_object, value, ast_id, return_id);
@@ -7517,8 +7469,7 @@ void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(
HValue* fun, int argument_count, bool pass_argument_count) {
- return New<HCallJSFunction>(
- fun, argument_count, pass_argument_count);
+ return New<HCallJSFunction>(fun, argument_count, pass_argument_count);
}
@@ -7545,7 +7496,8 @@ HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
HValue* target = Add<HConstant>(jsfun);
// For constant functions, we try to avoid calling the
// argument adaptor and instead call the function directly
- int formal_parameter_count = jsfun->shared()->formal_parameter_count();
+ int formal_parameter_count =
+ jsfun->shared()->internal_formal_parameter_count();
bool dont_adapt_arguments =
(formal_parameter_count ==
SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -7592,11 +7544,10 @@ inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
}
-void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
- Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
+ HValue* receiver,
+ SmallMapList* maps,
+ Handle<String> name) {
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
FunctionSorter order[kMaxCallPolymorphism];
@@ -7605,17 +7556,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
int ordered_functions = 0;
int i;
- for (i = 0; i < types->length() && ordered_functions < kMaxCallPolymorphism;
+ for (i = 0; i < maps->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
- PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
- if (info.CanAccessMonomorphic() && info.IsConstant() &&
+ PropertyAccessInfo info(this, LOAD, maps->at(i), name);
+ if (info.CanAccessMonomorphic() && info.IsDataConstant() &&
info.constant()->IsJSFunction()) {
- if (info.type()->Is(Type::String())) {
+ if (info.IsStringType()) {
if (handled_string) continue;
handled_string = true;
}
Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
- if (info.type()->Is(Type::Number())) {
+ if (info.IsNumberType()) {
handle_smi = true;
}
expr->set_target(target);
@@ -7626,8 +7577,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
std::sort(order, order + ordered_functions);
- if (i < types->length()) {
- types->Clear();
+ if (i < maps->length()) {
+ maps->Clear();
ordered_functions = -1;
}
@@ -7638,8 +7589,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
for (int fn = 0; fn < ordered_functions; ++fn) {
int i = order[fn].index();
- PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
- if (info.type()->Is(Type::String())) {
+ PropertyAccessInfo info(this, LOAD, maps->at(i), name);
+ if (info.IsStringType()) {
if (handled_string) continue;
handled_string = true;
}
@@ -7669,17 +7620,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
HUnaryControlInstruction* compare;
Handle<Map> map = info.map();
- if (info.type()->Is(Type::Number())) {
+ if (info.IsNumberType()) {
Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
- } else if (info.type()->Is(Type::String())) {
+ } else if (info.IsStringType()) {
compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
} else {
compare = New<HCompareMap>(receiver, map, if_true, if_false);
}
FinishCurrentBlock(compare);
- if (info.type()->Is(Type::Number())) {
+ if (info.IsNumberType()) {
GotoNoSimulate(if_true, number_block);
if_true = number_block;
}
@@ -7692,7 +7643,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
environment()->SetExpressionStackAt(0, function);
Push(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- bool needs_wrapping = NeedsWrappingFor(info.type(), target);
+ bool needs_wrapping = info.NeedsWrappingFor(target);
bool try_inline = FLAG_polymorphic_inlining && !needs_wrapping;
if (FLAG_trace_inlining && try_inline) {
Handle<JSFunction> caller = current_info()->closure();
@@ -7728,8 +7679,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
- if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization("Unknown map in polymorphic call");
+ if (ordered_functions == maps->length() && FLAG_deoptimize_uncommon_cases) {
+ FinishExitWithHardDeoptimization(Deoptimizer::kUnknownMapInPolymorphicCall);
} else {
Property* prop = expr->expression()->AsProperty();
HInstruction* function = BuildNamedGeneric(
@@ -7840,10 +7791,9 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
int arguments_count,
HValue* implicit_return_value,
- BailoutId ast_id,
- BailoutId return_id,
+ BailoutId ast_id, BailoutId return_id,
InliningKind inlining_kind,
- HSourcePosition position) {
+ SourcePosition position) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -7879,6 +7829,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
}
// We don't want to add more than a certain number of nodes from inlining.
+ // Always inline small methods (<= 10 nodes).
if (inlined_count_ > Min(FLAG_max_inlined_nodes_cumulative,
kUnlimitedMaxInlinedNodesCumulative)) {
TraceInline(target, caller, "cumulative AST node limit reached");
@@ -7954,7 +7905,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
DCHECK(target_shared->has_deoptimization_support());
AstTyper::Run(&target_info);
- int function_id = graph()->TraceInlinedFunction(target_shared, position);
+ int function_id = top_info()->TraceInlinedFunction(target_shared, position);
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
@@ -8202,7 +8153,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
HValue* right = Pop();
HValue* left = Pop();
Drop(2); // Receiver and function.
- HInstruction* op = HMul::NewImul(zone(), context(), left, right);
+ HInstruction* op =
+ HMul::NewImul(isolate(), zone(), context(), left, right);
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
@@ -8215,6 +8167,30 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
}
+// static
+bool HOptimizedGraphBuilder::IsReadOnlyLengthDescriptor(
+ Handle<Map> jsarray_map) {
+ DCHECK(!jsarray_map->is_dictionary_map());
+ LookupResult lookup;
+ Isolate* isolate = jsarray_map->GetIsolate();
+ Handle<Name> length_string = isolate->factory()->length_string();
+ lookup.LookupDescriptor(*jsarray_map, *length_string);
+ return lookup.IsReadOnly();
+}
+
+
+// static
+bool HOptimizedGraphBuilder::CanInlineArrayResizeOperation(
+ Handle<Map> receiver_map) {
+ return !receiver_map.is_null() &&
+ receiver_map->instance_type() == JS_ARRAY_TYPE &&
+ IsFastElementsKind(receiver_map->elements_kind()) &&
+ !receiver_map->is_dictionary_map() &&
+ !IsReadOnlyLengthDescriptor(receiver_map) &&
+ !receiver_map->is_observed() && receiver_map->is_extensible();
+}
+
+
bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
Call* expr, Handle<JSFunction> function, Handle<Map> receiver_map,
int args_count_no_receiver) {
@@ -8327,19 +8303,15 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* right = Pop();
HValue* left = Pop();
Drop(2); // Receiver and function.
- HInstruction* result = HMul::NewImul(zone(), context(), left, right);
+ HInstruction* result =
+ HMul::NewImul(isolate(), zone(), context(), left, right);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
case kArrayPop: {
- if (receiver_map.is_null()) return false;
- if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ if (!CanInlineArrayResizeOperation(receiver_map)) return false;
ElementsKind elements_kind = receiver_map->elements_kind();
- if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false;
- if (!IsFastElementsKind(elements_kind)) return false;
- if (receiver_map->is_observed()) return false;
- if (!receiver_map->is_extensible()) return false;
Drop(args_count_no_receiver);
HValue* result;
@@ -8372,11 +8344,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
result = AddElementAccess(elements, reduced_length, NULL,
bounds_check, elements_kind, LOAD);
- Factory* factory = isolate()->factory();
- double nan_double = FixedDoubleArray::hole_nan_as_double();
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
- ? Add<HConstant>(factory->the_hole_value())
- : Add<HConstant>(nan_double);
+ ? graph()->GetConstantHole()
+ : Add<HConstant>(HConstant::kHoleNaN);
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
}
@@ -8398,13 +8368,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
return true;
}
case kArrayPush: {
- if (receiver_map.is_null()) return false;
- if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ if (!CanInlineArrayResizeOperation(receiver_map)) return false;
ElementsKind elements_kind = receiver_map->elements_kind();
- if (!IsFastElementsKind(elements_kind)) return false;
- if (receiver_map->is_observed()) return false;
- if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false;
- if (!receiver_map->is_extensible()) return false;
// If there may be elements accessors in the prototype chain, the fast
// inlined version can't be used.
@@ -8451,13 +8416,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
return true;
}
case kArrayShift: {
- if (receiver_map.is_null()) return false;
- if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ if (!CanInlineArrayResizeOperation(receiver_map)) return false;
ElementsKind kind = receiver_map->elements_kind();
- if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false;
- if (!IsFastElementsKind(kind)) return false;
- if (receiver_map->is_observed()) return false;
- if (!receiver_map->is_extensible()) return false;
// If there may be elements accessors in the prototype chain, the fast
// inlined version can't be used.
@@ -8538,8 +8498,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
// Put a hole at the end.
HValue* hole = IsFastSmiOrObjectElementsKind(kind)
- ? Add<HConstant>(isolate()->factory()->the_hole_value())
- : Add<HConstant>(FixedDoubleArray::hole_nan_as_double());
+ ? graph()->GetConstantHole()
+ : Add<HConstant>(HConstant::kHoleNaN);
if (IsFastSmiOrObjectElementsKind(kind)) kind = FAST_HOLEY_ELEMENTS;
Add<HStoreKeyed>(
elements, new_length, hole, kind, INITIALIZING_STORE);
@@ -8695,7 +8655,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
PrintF("\n");
}
- bool drop_extra = false;
+ bool is_function = false;
bool is_store = false;
switch (call_type) {
case kCallApiFunction:
@@ -8710,13 +8670,12 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
}
// Includes receiver.
PushArgumentsFromEnvironment(argc + 1);
- // Drop function after call.
- drop_extra = true;
+ is_function = true;
break;
case kCallApiGetter:
// Receiver and prototype chain cannot have changed.
DCHECK_EQ(0, argc);
- DCHECK_EQ(NULL, receiver);
+ DCHECK_NULL(receiver);
// Receiver is on expression stack.
receiver = Pop();
Add<HPushArguments>(receiver);
@@ -8726,7 +8685,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
is_store = true;
// Receiver and prototype chain cannot have changed.
DCHECK_EQ(1, argc);
- DCHECK_EQ(NULL, receiver);
+ DCHECK_NULL(receiver);
// Receiver and value are on expression stack.
HValue* value = Pop();
receiver = Pop();
@@ -8749,7 +8708,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
}
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data_obj(api_call_info->data(), isolate());
- bool call_data_is_undefined = call_data_obj->IsUndefined();
+ bool call_data_undefined = call_data_obj->IsUndefined();
HValue* call_data = Add<HConstant>(call_data_obj);
ApiFunction fun(v8::ToCData<Address>(api_call_info->callback()));
ExternalReference ref = ExternalReference(&fun,
@@ -8757,26 +8716,42 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
isolate());
HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
- HValue* op_vals[] = {
- context(),
- Add<HConstant>(function),
- call_data,
- holder,
- api_function_address
- };
-
- ApiFunctionDescriptor descriptor(isolate());
- CallApiFunctionStub stub(isolate(), is_store, call_data_is_undefined, argc);
- Handle<Code> code = stub.GetCode();
- HConstant* code_value = Add<HConstant>(code);
-
- DCHECK((sizeof(op_vals) / kPointerSize) == descriptor.GetEnvironmentLength());
-
- HInstruction* call = New<HCallWithDescriptor>(
- code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
+ HValue* op_vals[] = {context(), Add<HConstant>(function), call_data, holder,
+ api_function_address, nullptr};
+
+ HInstruction* call = nullptr;
+ if (!is_function) {
+ CallApiAccessorStub stub(isolate(), is_store, call_data_undefined);
+ Handle<Code> code = stub.GetCode();
+ HConstant* code_value = Add<HConstant>(code);
+ ApiAccessorDescriptor descriptor(isolate());
+ DCHECK(arraysize(op_vals) - 1 == descriptor.GetEnvironmentLength());
+ call = New<HCallWithDescriptor>(
+ code_value, argc + 1, descriptor,
+ Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
+ } else if (argc <= CallApiFunctionWithFixedArgsStub::kMaxFixedArgs) {
+ CallApiFunctionWithFixedArgsStub stub(isolate(), argc, call_data_undefined);
+ Handle<Code> code = stub.GetCode();
+ HConstant* code_value = Add<HConstant>(code);
+ ApiFunctionWithFixedArgsDescriptor descriptor(isolate());
+ DCHECK(arraysize(op_vals) - 1 == descriptor.GetEnvironmentLength());
+ call = New<HCallWithDescriptor>(
+ code_value, argc + 1, descriptor,
+ Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
+ Drop(1); // Drop function.
+ } else {
+ op_vals[arraysize(op_vals) - 1] = Add<HConstant>(argc);
+ CallApiFunctionStub stub(isolate(), call_data_undefined);
+ Handle<Code> code = stub.GetCode();
+ HConstant* code_value = Add<HConstant>(code);
+ ApiFunctionDescriptor descriptor(isolate());
+ DCHECK(arraysize(op_vals) == descriptor.GetEnvironmentLength());
+ call = New<HCallWithDescriptor>(
+ code_value, argc + 1, descriptor,
+ Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
+ Drop(1); // Drop function.
+ }
- if (drop_extra) Drop(1); // Drop function.
ast_context()->ReturnInstruction(call, ast_id);
return true;
}
@@ -8921,7 +8896,7 @@ void HOptimizedGraphBuilder::BuildFunctionCall(Call* expr) {
HValue* HOptimizedGraphBuilder::ImplicitReceiverFor(HValue* function,
Handle<JSFunction> target) {
SharedFunctionInfo* shared = target->shared();
- if (shared->strict_mode() == SLOPPY && !shared->native()) {
+ if (is_sloppy(shared->language_mode()) && !shared->native()) {
// Cannot embed a direct reference to the global proxy
// as is it dropped on deserialization.
CHECK(!isolate()->serializer_enabled());
@@ -9153,14 +9128,14 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* receiver = Top();
- SmallMapList* types;
- ComputeReceiverTypes(expr, receiver, &types, zone());
+ SmallMapList* maps;
+ ComputeReceiverTypes(expr, receiver, &maps, zone());
- if (prop->key()->IsPropertyName() && types->length() > 0) {
+ if (prop->key()->IsPropertyName() && maps->length() > 0) {
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- PropertyAccessInfo info(this, LOAD, ToType(types->first()), name);
- if (!info.CanAccessAsMonomorphic(types)) {
- HandlePolymorphicCallNamed(expr, receiver, types, name);
+ PropertyAccessInfo info(this, LOAD, maps->first(), name);
+ if (!info.CanAccessAsMonomorphic(maps)) {
+ HandlePolymorphicCallNamed(expr, receiver, maps, name);
return;
}
}
@@ -9176,8 +9151,6 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
-
-
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
// Push the function under the receiver.
@@ -9191,7 +9164,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (TryIndirectCall(expr)) return;
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- Handle<Map> map = types->length() == 1 ? types->first() : Handle<Map>();
+ Handle<Map> map = maps->length() == 1 ? maps->first() : Handle<Map>();
if (TryInlineBuiltinMethodCall(expr, known_function, map,
expr->arguments()->length())) {
if (FLAG_trace_inlining) {
@@ -9201,10 +9174,10 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
return;
}
- if (TryInlineApiMethodCall(expr, receiver, types)) return;
+ if (TryInlineApiMethodCall(expr, receiver, maps)) return;
// Wrap the receiver if necessary.
- if (NeedsWrappingFor(ToType(types->first()), known_function)) {
+ if (NeedsWrapping(maps->first(), known_function)) {
// Since HWrapReceiver currently cannot actually wrap numbers and
// strings, use the regular CallFunctionStub for method calls to wrap
// the receiver.
@@ -9223,8 +9196,9 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (CanBeFunctionApplyArguments(expr) && expr->is_uninitialized()) {
// We have to use EAGER deoptimization here because Deoptimizer::SOFT
// gets ignored by the always-opt flag, which leads to incorrect code.
- Add<HDeoptimize>("Insufficient type feedback for call with arguments",
- Deoptimizer::EAGER);
+ Add<HDeoptimize>(
+ Deoptimizer::kInsufficientTypeFeedbackForCallWithArguments,
+ Deoptimizer::EAGER);
arguments_flag = ARGUMENTS_FAKED;
}
@@ -9324,7 +9298,20 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitExpressions(expr->arguments()));
PushArgumentsFromEnvironment(argument_count);
- call = New<HCallFunction>(function, argument_count);
+ HCallFunction* call_function =
+ New<HCallFunction>(function, argument_count);
+ call = call_function;
+ if (expr->is_uninitialized() &&
+ expr->IsUsingCallFeedbackICSlot(isolate())) {
+ // We've never seen this call before, so let's have Crankshaft learn
+ // through the type vector.
+ Handle<SharedFunctionInfo> current_shared =
+ function_state()->compilation_info()->shared_info();
+ Handle<TypeFeedbackVector> vector =
+ handle(current_shared->feedback_vector(), isolate());
+ FeedbackVectorICSlot slot = expr->CallFeedbackICSlot();
+ call_function->SetVectorAndSlot(vector, slot);
+ }
}
}
@@ -9996,7 +9983,7 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
HValue* key = Pop();
HValue* obj = Pop();
HValue* function = AddLoadJSBuiltin(Builtins::DELETE);
- Add<HPushArguments>(obj, key, Add<HConstant>(function_strict_mode()));
+ Add<HPushArguments>(obj, key, Add<HConstant>(function_language_mode()));
// TODO(olivf) InvokeFunction produces a check for the parameter count,
// even though we are certain to pass the correct number of arguments here.
HInstruction* instr = New<HInvokeFunction>(function, 3);
@@ -10263,7 +10250,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
int32_t i = c_index->NumberValueAsInteger32();
Handle<String> s = c_string->StringValue();
if (i < 0 || i >= s->length()) {
- return New<HConstant>(base::OS::nan_value());
+ return New<HConstant>(std::numeric_limits<double>::quiet_NaN());
}
return New<HConstant>(s->Get(i));
}
@@ -10349,7 +10336,8 @@ HValue* HGraphBuilder::EnforceNumberType(HValue* number,
HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
- Maybe<HConstant*> number = constant->CopyToTruncatedNumber(zone());
+ Maybe<HConstant*> number =
+ constant->CopyToTruncatedNumber(isolate(), zone());
if (number.has_value) {
*expected = Type::Number(zone());
return AddInstruction(number.value);
@@ -10441,8 +10429,9 @@ HValue* HGraphBuilder::BuildBinaryOperation(
right_type->Maybe(Type::Receiver()));
if (!left_type->IsInhabited()) {
- Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
- Deoptimizer::SOFT);
+ Add<HDeoptimize>(
+ Deoptimizer::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
+ Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous
// defaults.
left_type = Type::Any(zone());
@@ -10452,8 +10441,9 @@ HValue* HGraphBuilder::BuildBinaryOperation(
}
if (!right_type->IsInhabited()) {
- Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
- Deoptimizer::SOFT);
+ Add<HDeoptimize>(
+ Deoptimizer::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
+ Deoptimizer::SOFT);
right_type = Type::Any(zone());
} else {
if (!maybe_string_add) right = TruncateToNumber(right, &right_type);
@@ -10573,7 +10563,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
IfBuilder if_same(this);
if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
if_same.Then();
- if_same.ElseDeopt("Unexpected RHS of binary operation");
+ if_same.ElseDeopt(Deoptimizer::kUnexpectedRHSOfBinaryOperation);
right = fixed_right;
}
instr = AddUncasted<HMod>(left, right);
@@ -10934,22 +10924,16 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
- Token::Value op,
- HValue* left,
- HValue* right,
- Type* left_type,
- Type* right_type,
- Type* combined_type,
- HSourcePosition left_position,
- HSourcePosition right_position,
- PushBeforeSimulateBehavior push_sim_result,
+ Token::Value op, HValue* left, HValue* right, Type* left_type,
+ Type* right_type, Type* combined_type, SourcePosition left_position,
+ SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
BailoutId bailout_id) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
if (!combined_type->IsInhabited()) {
- Add<HDeoptimize>("Insufficient type feedback for combined type "
- "of binary operation",
- Deoptimizer::SOFT);
+ Add<HDeoptimize>(
+ Deoptimizer::kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
+ Deoptimizer::SOFT);
combined_type = left_type = right_type = Type::Any(zone());
}
@@ -10965,7 +10949,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HConstant::cast(left)->HasNumberValue()) ||
(right->IsConstant() &&
HConstant::cast(right)->HasNumberValue())) {
- Add<HDeoptimize>("Type mismatch between feedback and constant",
+ Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
@@ -11003,7 +10987,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
!HConstant::cast(left)->HasInternalizedStringValue()) ||
(right->IsConstant() &&
!HConstant::cast(right)->HasInternalizedStringValue())) {
- Add<HDeoptimize>("Type mismatch between feedback and constant",
+ Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
@@ -11228,7 +11212,7 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
int copied_fields = 0;
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
+ if (details.type() != DATA) continue;
copied_fields++;
FieldIndex field_index = FieldIndex::ForDescriptor(*boilerplate_map, i);
@@ -11402,9 +11386,10 @@ void HOptimizedGraphBuilder::VisitDeclarations(
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
- int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
+ int flags =
+ DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
- DeclareGlobalsStrictMode::encode(current_info()->strict_mode());
+ DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
Add<HDeclareGlobals>(array, flags);
globals_.Rewind(0);
}
@@ -11509,11 +11494,6 @@ void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
}
-void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
- UNREACHABLE();
-}
-
-
void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
UNREACHABLE();
}
@@ -11778,7 +11758,7 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
- DCHECK_NE(NULL, call->arguments()->at(1)->AsLiteral());
+ DCHECK_NOT_NULL(call->arguments()->at(1)->AsLiteral());
Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* date = Pop();
@@ -12052,6 +12032,12 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateDefaultConstructorCallSuper(
+ CallRuntime* call) {
+ return Bailout(kSuperReference);
+}
+
+
// Fast call to math functions.
void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
DCHECK_EQ(2, call->arguments()->length());
@@ -13244,7 +13230,7 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
if (FLAG_hydrogen_track_positions &&
instruction->has_position() &&
instruction->position().raw() != 0) {
- const HSourcePosition pos = instruction->position();
+ const SourcePosition pos = instruction->position();
os << " pos:";
if (pos.inlining_id() != 0) os << pos.inlining_id() << "_";
os << pos.position();
@@ -13389,9 +13375,9 @@ void HStatistics::Print() {
double percent = times_[i].PercentOf(sum);
PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
- unsigned size = sizes_[i];
+ size_t size = sizes_[i];
double size_percent = static_cast<double>(size) * 100 / total_size_;
- PrintF(" %9u bytes / %4.1f %%\n", size, size_percent);
+ PrintF(" %9zu bytes / %4.1f %%\n", size, size_percent);
}
PrintF(
@@ -13407,7 +13393,7 @@ void HStatistics::Print() {
PrintF(
"----------------------------------------"
"----------------------------------------\n");
- PrintF("%33s %8.3f ms %9u bytes\n", "Total",
+ PrintF("%33s %8.3f ms %9zu bytes\n", "Total",
total.InMillisecondsF(), total_size_);
PrintF("%33s (%.1f times slower than full code gen)\n", "",
total.TimesOf(full_code_gen_));
@@ -13426,7 +13412,7 @@ void HStatistics::Print() {
void HStatistics::SaveTiming(const char* name, base::TimeDelta time,
- unsigned size) {
+ size_t size) {
total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
if (strcmp(names_[i], name) == 0) {
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index c1ed797c96..8411b6ddf9 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -88,7 +88,7 @@ class HBasicBlock FINAL : public ZoneObject {
bool IsFinished() const { return end_ != NULL; }
void AddPhi(HPhi* phi);
void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr, HSourcePosition position);
+ void AddInstruction(HInstruction* instr, SourcePosition position);
bool Dominates(HBasicBlock* other) const;
bool EqualToOrDominates(HBasicBlock* other) const;
int LoopNestingDepth() const;
@@ -114,8 +114,7 @@ class HBasicBlock FINAL : public ZoneObject {
int PredecessorIndexOf(HBasicBlock* predecessor) const;
HPhi* AddNewPhi(int merged_index);
- HSimulate* AddNewSimulate(BailoutId ast_id,
- HSourcePosition position,
+ HSimulate* AddNewSimulate(BailoutId ast_id, SourcePosition position,
RemovableSimulate removable = FIXED_SIMULATE) {
HSimulate* instr = CreateSimulate(ast_id, removable);
AddInstruction(instr, position);
@@ -167,21 +166,18 @@ class HBasicBlock FINAL : public ZoneObject {
friend class HGraphBuilder;
HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
- void Finish(HControlInstruction* last, HSourcePosition position);
- void FinishExit(HControlInstruction* instruction, HSourcePosition position);
- void Goto(HBasicBlock* block,
- HSourcePosition position,
- FunctionState* state = NULL,
- bool add_simulate = true);
- void GotoNoSimulate(HBasicBlock* block, HSourcePosition position) {
+ void Finish(HControlInstruction* last, SourcePosition position);
+ void FinishExit(HControlInstruction* instruction, SourcePosition position);
+ void Goto(HBasicBlock* block, SourcePosition position,
+ FunctionState* state = NULL, bool add_simulate = true);
+ void GotoNoSimulate(HBasicBlock* block, SourcePosition position) {
Goto(block, position, NULL, false);
}
// Add the inlined function exit sequence, adding an HLeaveInlined
// instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value,
- FunctionState* state,
- HSourcePosition position);
+ void AddLeaveInlined(HValue* return_value, FunctionState* state,
+ SourcePosition position);
private:
void RegisterPredecessor(HBasicBlock* pred);
@@ -461,11 +457,11 @@ class HGraph FINAL : public ZoneObject {
// identifier to each inlining and dumps function source if it was inlined
// for the first time during the current optimization.
int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- HSourcePosition position);
+ SourcePosition position);
- // Converts given HSourcePosition to the absolute offset from the start of
+ // Converts given SourcePosition to the absolute offset from the start of
// the corresponding script.
- int SourcePositionToScriptPosition(HSourcePosition position);
+ int SourcePositionToScriptPosition(SourcePosition position);
private:
HConstant* ReinsertConstantIfNecessary(HConstant* constant);
@@ -510,23 +506,6 @@ class HGraph FINAL : public ZoneObject {
int no_side_effects_scope_count_;
bool disallow_adding_new_values_;
- class InlinedFunctionInfo {
- public:
- explicit InlinedFunctionInfo(Handle<SharedFunctionInfo> shared)
- : shared_(shared), start_position_(shared->start_position()) {
- }
-
- Handle<SharedFunctionInfo> shared() const { return shared_; }
- int start_position() const { return start_position_; }
-
- private:
- Handle<SharedFunctionInfo> shared_;
- int start_position_;
- };
-
- ZoneList<InlinedFunctionInfo> inlined_functions_;
- ZoneList<int> inlining_id_to_function_id_;
-
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -958,7 +937,7 @@ class FunctionState FINAL {
HArgumentsElements* arguments_elements_;
int inlining_id_;
- HSourcePosition outer_source_position_;
+ SourcePosition outer_source_position_;
FunctionState* outer_;
};
@@ -1046,7 +1025,7 @@ class HGraphBuilder {
graph_(NULL),
current_block_(NULL),
scope_(info->scope()),
- position_(HSourcePosition::Unknown()),
+ position_(SourcePosition::Unknown()),
start_position_(0) {}
virtual ~HGraphBuilder() {}
@@ -1102,11 +1081,15 @@ class HGraphBuilder {
return AddLeaveInlined(current_block(), return_value, state);
}
- template<class I>
- HInstruction* NewUncasted() { return I::New(zone(), context()); }
+ template <class I>
+ HInstruction* NewUncasted() {
+ return I::New(isolate(), zone(), context());
+ }
- template<class I>
- I* New() { return I::New(zone(), context()); }
+ template <class I>
+ I* New() {
+ return I::New(isolate(), zone(), context());
+ }
template<class I>
HInstruction* AddUncasted() { return AddInstruction(NewUncasted<I>());}
@@ -1116,11 +1099,13 @@ class HGraphBuilder {
template<class I, class P1>
HInstruction* NewUncasted(P1 p1) {
- return I::New(zone(), context(), p1);
+ return I::New(isolate(), zone(), context(), p1);
}
- template<class I, class P1>
- I* New(P1 p1) { return I::New(zone(), context(), p1); }
+ template <class I, class P1>
+ I* New(P1 p1) {
+ return I::New(isolate(), zone(), context(), p1);
+ }
template<class I, class P1>
HInstruction* AddUncasted(P1 p1) {
@@ -1144,12 +1129,12 @@ class HGraphBuilder {
template<class I, class P1, class P2>
HInstruction* NewUncasted(P1 p1, P2 p2) {
- return I::New(zone(), context(), p1, p2);
+ return I::New(isolate(), zone(), context(), p1, p2);
}
template<class I, class P1, class P2>
I* New(P1 p1, P2 p2) {
- return I::New(zone(), context(), p1, p2);
+ return I::New(isolate(), zone(), context(), p1, p2);
}
template<class I, class P1, class P2>
@@ -1172,12 +1157,12 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3>
HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3) {
- return I::New(zone(), context(), p1, p2, p3);
+ return I::New(isolate(), zone(), context(), p1, p2, p3);
}
template<class I, class P1, class P2, class P3>
I* New(P1 p1, P2 p2, P3 p3) {
- return I::New(zone(), context(), p1, p2, p3);
+ return I::New(isolate(), zone(), context(), p1, p2, p3);
}
template<class I, class P1, class P2, class P3>
@@ -1192,12 +1177,12 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4>
HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4) {
- return I::New(zone(), context(), p1, p2, p3, p4);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4);
}
template<class I, class P1, class P2, class P3, class P4>
I* New(P1 p1, P2 p2, P3 p3, P4 p4) {
- return I::New(zone(), context(), p1, p2, p3, p4);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4);
}
template<class I, class P1, class P2, class P3, class P4>
@@ -1212,12 +1197,12 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5>
HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return I::New(zone(), context(), p1, p2, p3, p4, p5);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5);
}
template<class I, class P1, class P2, class P3, class P4, class P5>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return I::New(zone(), context(), p1, p2, p3, p4, p5);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5);
}
template<class I, class P1, class P2, class P3, class P4, class P5>
@@ -1232,12 +1217,12 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return I::New(zone(), context(), p1, p2, p3, p4, p5, p6);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6);
}
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return I::New(zone(), context(), p1, p2, p3, p4, p5, p6);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6);
}
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
@@ -1253,13 +1238,13 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4,
class P5, class P6, class P7>
HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7);
}
template<class I, class P1, class P2, class P3, class P4,
class P5, class P6, class P7>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7);
}
template<class I, class P1, class P2, class P3,
@@ -1278,13 +1263,13 @@ class HGraphBuilder {
class P5, class P6, class P7, class P8>
HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4,
P5 p5, P6 p6, P7 p7, P8 p8) {
- return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
}
template<class I, class P1, class P2, class P3, class P4,
class P5, class P6, class P7, class P8>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
- return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
}
template<class I, class P1, class P2, class P3, class P4,
@@ -1469,7 +1454,7 @@ class HGraphBuilder {
HValue* EnforceNumberType(HValue* number, Type* expected);
HValue* TruncateToNumber(HValue* value, Type** expected);
- void FinishExitWithHardDeoptimization(const char* reason);
+ void FinishExitWithHardDeoptimization(Deoptimizer::DeoptReason reason);
void AddIncrementCounter(StatsCounter* counter);
@@ -1617,12 +1602,12 @@ class HGraphBuilder {
void Else();
void End();
- void Deopt(const char* reason);
- void ThenDeopt(const char* reason) {
+ void Deopt(Deoptimizer::DeoptReason reason);
+ void ThenDeopt(Deoptimizer::DeoptReason reason) {
Then();
Deopt(reason);
}
- void ElseDeopt(const char* reason) {
+ void ElseDeopt(Deoptimizer::DeoptReason reason) {
Else();
Deopt(reason);
}
@@ -1896,18 +1881,16 @@ class HGraphBuilder {
}
// Convert the given absolute offset from the start of the script to
- // the HSourcePosition assuming that this position corresponds to the
+ // the SourcePosition assuming that this position corresponds to the
// same function as current position_.
- HSourcePosition ScriptPositionToSourcePosition(int position) {
- HSourcePosition pos = position_;
+ SourcePosition ScriptPositionToSourcePosition(int position) {
+ SourcePosition pos = position_;
pos.set_position(position - start_position_);
return pos;
}
- HSourcePosition source_position() { return position_; }
- void set_source_position(HSourcePosition position) {
- position_ = position;
- }
+ SourcePosition source_position() { return position_; }
+ void set_source_position(SourcePosition position) { position_ = position; }
template <typename ViewClass>
void BuildArrayBufferViewInitialization(HValue* obj,
@@ -1927,14 +1910,14 @@ class HGraphBuilder {
HGraph* graph_;
HBasicBlock* current_block_;
Scope* scope_;
- HSourcePosition position_;
+ SourcePosition position_;
int start_position_;
};
-template<>
+template <>
inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
- const char* reason, Deoptimizer::BailoutType type) {
+ Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_requested()->Increment();
if (FLAG_always_opt) return NULL;
@@ -1952,9 +1935,9 @@ inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
}
-template<>
+template <>
inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
- const char* reason, Deoptimizer::BailoutType type) {
+ Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
return Add<HDeoptimize>(reason, type);
}
@@ -2128,9 +2111,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
- void* operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
- }
+ void* operator new(size_t size, Zone* zone) { return zone->New(size); }
void operator delete(void* pointer, Zone* zone) { }
void operator delete(void* pointer) { }
@@ -2183,8 +2164,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void ClearInlinedTestContext() {
function_state()->ClearInlinedTestContext();
}
- StrictMode function_strict_mode() {
- return function_state()->compilation_info()->strict_mode();
+ LanguageMode function_language_mode() {
+ return function_state()->compilation_info()->language_mode();
}
// Generators for inline runtime functions.
@@ -2301,8 +2282,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- Type* ToType(Handle<Map> map);
-
private:
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
@@ -2338,13 +2317,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Handle<JSFunction> target);
int InliningAstSize(Handle<JSFunction> target);
- bool TryInline(Handle<JSFunction> target,
- int arguments_count,
- HValue* implicit_return_value,
- BailoutId ast_id,
- BailoutId return_id,
- InliningKind inlining_kind,
- HSourcePosition position);
+ bool TryInline(Handle<JSFunction> target, int arguments_count,
+ HValue* implicit_return_value, BailoutId ast_id,
+ BailoutId return_id, InliningKind inlining_kind,
+ SourcePosition position);
bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
@@ -2385,6 +2361,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
int argc,
BailoutId ast_id,
ApiCallType call_type);
+ static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
+ static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
// If --trace-inlining, print a line of the inlining trace. Inlining
// succeeded if the reason string is NULL and failed if there is a
@@ -2452,19 +2430,132 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void BuildInlinedCallArray(Expression* expression, int argument_count,
Handle<AllocationSite> site);
+ class LookupResult FINAL BASE_EMBEDDED {
+ public:
+ LookupResult()
+ : lookup_type_(NOT_FOUND),
+ details_(NONE, DATA, Representation::None()) {}
+
+ void LookupDescriptor(Map* map, Name* name) {
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int number = descriptors->SearchWithCache(name, map);
+ if (number == DescriptorArray::kNotFound) return NotFound();
+ lookup_type_ = DESCRIPTOR_TYPE;
+ details_ = descriptors->GetDetails(number);
+ number_ = number;
+ }
+
+ void LookupTransition(Map* map, Name* name, PropertyAttributes attributes) {
+ int transition_index = map->SearchTransition(kData, name, attributes);
+ if (transition_index == TransitionArray::kNotFound) return NotFound();
+ lookup_type_ = TRANSITION_TYPE;
+ transition_ = handle(map->GetTransition(transition_index));
+ number_ = transition_->LastAdded();
+ details_ = transition_->instance_descriptors()->GetDetails(number_);
+ }
+
+ void NotFound() {
+ lookup_type_ = NOT_FOUND;
+ details_ = PropertyDetails(NONE, DATA, 0);
+ }
+
+ Representation representation() const {
+ DCHECK(IsFound());
+ return details_.representation();
+ }
+
+ // Property callbacks does not include transitions to callbacks.
+ bool IsAccessorConstant() const {
+ return !IsTransition() && details_.type() == ACCESSOR_CONSTANT;
+ }
+
+ bool IsReadOnly() const {
+ DCHECK(IsFound());
+ return details_.IsReadOnly();
+ }
+
+ bool IsData() const {
+ return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == DATA;
+ }
+
+ bool IsDataConstant() const {
+ return lookup_type_ == DESCRIPTOR_TYPE &&
+ details_.type() == DATA_CONSTANT;
+ }
+
+ bool IsConfigurable() const { return details_.IsConfigurable(); }
+ bool IsFound() const { return lookup_type_ != NOT_FOUND; }
+ bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
+
+ // Is the result is a property excluding transitions and the null
+ // descriptor?
+ bool IsProperty() const { return IsFound() && !IsTransition(); }
+
+ Handle<Map> GetTransitionTarget() const {
+ DCHECK(IsTransition());
+ return transition_;
+ }
+
+ bool IsTransitionToData() const {
+ return IsTransition() && details_.type() == DATA;
+ }
+
+ int GetLocalFieldIndexFromMap(Map* map) const {
+ return GetFieldIndexFromMap(map) - map->inobject_properties();
+ }
+
+ Object* GetConstantFromMap(Map* map) const {
+ DCHECK(details_.type() == DATA_CONSTANT);
+ return GetValueFromMap(map);
+ }
+
+ Object* GetValueFromMap(Map* map) const {
+ DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
+ lookup_type_ == TRANSITION_TYPE);
+ DCHECK(number_ < map->NumberOfOwnDescriptors());
+ return map->instance_descriptors()->GetValue(number_);
+ }
+
+ int GetFieldIndexFromMap(Map* map) const {
+ DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
+ lookup_type_ == TRANSITION_TYPE);
+ DCHECK(number_ < map->NumberOfOwnDescriptors());
+ return map->instance_descriptors()->GetFieldIndex(number_);
+ }
+
+ HeapType* GetFieldTypeFromMap(Map* map) const {
+ DCHECK_NE(NOT_FOUND, lookup_type_);
+ DCHECK(number_ < map->NumberOfOwnDescriptors());
+ return map->instance_descriptors()->GetFieldType(number_);
+ }
+
+ Map* GetFieldOwnerFromMap(Map* map) const {
+ DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
+ lookup_type_ == TRANSITION_TYPE);
+ DCHECK(number_ < map->NumberOfOwnDescriptors());
+ return map->FindFieldOwner(number_);
+ }
+
+ private:
+ // Where did we find the result;
+ enum { NOT_FOUND, DESCRIPTOR_TYPE, TRANSITION_TYPE } lookup_type_;
+
+ Handle<Map> transition_;
+ int number_;
+ PropertyDetails details_;
+ };
+
class PropertyAccessInfo {
public:
PropertyAccessInfo(HOptimizedGraphBuilder* builder,
- PropertyAccessType access_type,
- Type* type,
+ PropertyAccessType access_type, Handle<Map> map,
Handle<String> name)
- : lookup_(builder->isolate()),
- builder_(builder),
+ : builder_(builder),
access_type_(access_type),
- type_(type),
+ map_(map),
name_(name),
field_type_(HType::Tagged()),
- access_(HObjectAccess::ForMap()) { }
+ access_(HObjectAccess::ForMap()) {}
// Checkes whether this PropertyAccessInfo can be handled as a monomorphic
// load named. It additionally fills in the fields necessary to generate the
@@ -2479,26 +2570,27 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
// PropertyAccessInfo is built for types->first().
bool CanAccessAsMonomorphic(SmallMapList* types);
+ bool NeedsWrappingFor(Handle<JSFunction> target) const;
+
Handle<Map> map();
- Type* type() const { return type_; }
Handle<String> name() const { return name_; }
bool IsJSObjectFieldAccessor() {
int offset; // unused
- return Accessors::IsJSObjectFieldAccessor<Type>(type_, name_, &offset);
+ return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset);
}
bool GetJSObjectFieldAccess(HObjectAccess* access) {
int offset;
- if (Accessors::IsJSObjectFieldAccessor<Type>(type_, name_, &offset)) {
- if (type_->Is(Type::String())) {
+ if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
+ if (IsStringType()) {
DCHECK(String::Equals(isolate()->factory()->length_string(), name_));
*access = HObjectAccess::ForStringLength();
- } else if (type_->Is(Type::Array())) {
+ } else if (IsArrayType()) {
DCHECK(String::Equals(isolate()->factory()->length_string(), name_));
- *access = HObjectAccess::ForArrayLength(map()->elements_kind());
+ *access = HObjectAccess::ForArrayLength(map_->elements_kind());
} else {
- *access = HObjectAccess::ForMapAndOffset(map(), offset);
+ *access = HObjectAccess::ForMapAndOffset(map_, offset);
}
return true;
}
@@ -2508,24 +2600,30 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
bool has_holder() { return !holder_.is_null(); }
bool IsLoad() const { return access_type_ == LOAD; }
+ Isolate* isolate() const { return builder_->isolate(); }
Handle<JSObject> holder() { return holder_; }
Handle<JSFunction> accessor() { return accessor_; }
Handle<Object> constant() { return constant_; }
- Handle<Map> transition() { return handle(lookup_.GetTransitionTarget()); }
+ Handle<Map> transition() { return lookup_.GetTransitionTarget(); }
SmallMapList* field_maps() { return &field_maps_; }
HType field_type() const { return field_type_; }
HObjectAccess access() { return access_; }
bool IsFound() const { return lookup_.IsFound(); }
bool IsProperty() const { return lookup_.IsProperty(); }
- bool IsField() const { return lookup_.IsField(); }
- bool IsConstant() const { return lookup_.IsConstant(); }
- bool IsAccessor() const { return lookup_.IsPropertyCallbacks(); }
+ bool IsData() const { return lookup_.IsData(); }
+ bool IsDataConstant() const { return lookup_.IsDataConstant(); }
+ bool IsAccessorConstant() const { return lookup_.IsAccessorConstant(); }
bool IsTransition() const { return lookup_.IsTransition(); }
bool IsConfigurable() const { return lookup_.IsConfigurable(); }
bool IsReadOnly() const { return lookup_.IsReadOnly(); }
+ bool IsStringType() { return map_->instance_type() < FIRST_NONSTRING_TYPE; }
+ bool IsNumberType() { return map_->instance_type() == HEAP_NUMBER_TYPE; }
+ bool IsValueWrapped() { return IsStringType() || IsNumberType(); }
+ bool IsArrayType() { return map_->instance_type() == JS_ARRAY_TYPE; }
+
private:
Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
return handle(lookup_.GetValueFromMap(*map), isolate());
@@ -2544,9 +2642,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
}
Representation representation() const { return lookup_.representation(); }
- Type* ToType(Handle<Map> map) { return builder_->ToType(map); }
Zone* zone() { return builder_->zone(); }
- Isolate* isolate() const { return lookup_.isolate(); }
CompilationInfo* top_info() { return builder_->top_info(); }
CompilationInfo* current_info() { return builder_->current_info(); }
@@ -2564,7 +2660,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
LookupResult lookup_;
HOptimizedGraphBuilder* builder_;
PropertyAccessType access_type_;
- Type* type_;
+ Handle<Map> map_;
Handle<String> name_;
Handle<JSObject> holder_;
Handle<JSFunction> accessor_;
@@ -2609,15 +2705,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
};
HControlInstruction* BuildCompareInstruction(
- Token::Value op,
- HValue* left,
- HValue* right,
- Type* left_type,
- Type* right_type,
- Type* combined_type,
- HSourcePosition left_position,
- HSourcePosition right_position,
- PushBeforeSimulateBehavior push_sim_result,
+ Token::Value op, HValue* left, HValue* right, Type* left_type,
+ Type* right_type, Type* combined_type, SourcePosition left_position,
+ SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
BailoutId bailout_id);
HInstruction* BuildStringCharCodeAt(HValue* string,
@@ -2793,7 +2883,7 @@ class HStatistics FINAL: public Malloced {
void Initialize(CompilationInfo* info);
void Print();
- void SaveTiming(const char* name, base::TimeDelta time, unsigned size);
+ void SaveTiming(const char* name, base::TimeDelta time, size_t size);
void IncrementFullCodeGen(base::TimeDelta full_code_gen) {
full_code_gen_ += full_code_gen;
@@ -2818,11 +2908,11 @@ class HStatistics FINAL: public Malloced {
private:
List<base::TimeDelta> times_;
List<const char*> names_;
- List<unsigned> sizes_;
+ List<size_t> sizes_;
base::TimeDelta create_graph_;
base::TimeDelta optimize_graph_;
base::TimeDelta generate_code_;
- unsigned total_size_;
+ size_t total_size_;
base::TimeDelta full_code_gen_;
double source_size_;
};
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 168a196449..511b1c8688 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -36,12 +36,17 @@
#include "src/ia32/assembler-ia32.h"
+#include <cstring>
+
+#if V8_TARGET_ARCH_IA32
+
+#if V8_LIBC_MSVCRT
+#include <intrin.h> // _xgetbv()
+#endif
#if V8_OS_MACOSX
#include <sys/sysctl.h>
#endif
-#if V8_TARGET_ARCH_IA32
-
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/disassembler.h"
@@ -56,22 +61,44 @@ namespace internal {
namespace {
-bool EnableAVX() {
+#if !V8_LIBC_MSVCRT
+
+V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
+ unsigned eax, edx;
+ // Check xgetbv; this uses a .byte sequence instead of the instruction
+ // directly because older assemblers do not include support for xgetbv and
+ // there is no easy way to conditionally compile based on the assembler
+ // used.
+ __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32);
+}
+
+#define _XCR_XFEATURE_ENABLED_MASK 0
+
+#endif // !V8_LIBC_MSVCRT
+
+
+bool OSHasAVXSupport() {
#if V8_OS_MACOSX
- // Mac OS X 10.9 has a bug where AVX transitions were indeed being caused by
- // ISRs, so we detect Mac OS X 10.9 here and disable AVX in that case.
+ // Mac OS X up to 10.9 has a bug where AVX transitions were indeed being
+ // caused by ISRs, so we detect that here and disable AVX in that case.
char buffer[128];
size_t buffer_size = arraysize(buffer);
- int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+ int ctl_name[] = {CTL_KERN, KERN_OSRELEASE};
if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
- // XX is the major kernel version component. 13.x.x (Mavericks) is
- // affected by this bug, so disable AVX there.
- if (memcmp(buffer, "13.", 3) == 0) return false;
+ // XX is the major kernel version component.
+ char* period_pos = strchr(buffer, '.');
+ DCHECK_NOT_NULL(period_pos);
+ *period_pos = '\0';
+ long kernel_version_major = strtol(buffer, nullptr, 10); // NOLINT
+ if (kernel_version_major <= 13) return false;
#endif // V8_OS_MACOSX
- return FLAG_enable_avx;
+ // Check whether OS claims to support AVX.
+ uint64_t feature_mask = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
+ return (feature_mask & 0x6) == 0x6;
}
} // namespace
@@ -87,16 +114,28 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
- if (cpu.has_avx() && EnableAVX()) supported_ |= 1u << AVX;
- if (cpu.has_fma3() && FLAG_enable_fma3) supported_ |= 1u << FMA3;
+ if (cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
+ OSHasAVXSupport()) {
+ supported_ |= 1u << AVX;
+ }
+ if (cpu.has_fma3() && FLAG_enable_fma3 && cpu.has_osxsave() &&
+ OSHasAVXSupport()) {
+ supported_ |= 1u << FMA3;
+ }
+ if (strcmp(FLAG_mcpu, "auto") == 0) {
+ if (cpu.is_atom()) supported_ |= 1u << ATOM;
+ } else if (strcmp(FLAG_mcpu, "atom") == 0) {
+ supported_ |= 1u << ATOM;
+ }
}
void CpuFeatures::PrintTarget() { }
void CpuFeatures::PrintFeatures() {
- printf("SSE3=%d SSE4_1=%d AVX=%d FMA3=%d\n", CpuFeatures::IsSupported(SSE3),
- CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(AVX),
- CpuFeatures::IsSupported(FMA3));
+ printf("SSE3=%d SSE4_1=%d AVX=%d FMA3=%d ATOM=%d\n",
+ CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSE4_1),
+ CpuFeatures::IsSupported(AVX), CpuFeatures::IsSupported(FMA3),
+ CpuFeatures::IsSupported(ATOM));
}
@@ -294,6 +333,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
+ reloc_info_writer.Finish();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -1301,6 +1341,13 @@ void Assembler::ret(int imm16) {
}
+void Assembler::ud2() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x0B);
+}
+
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -1339,7 +1386,10 @@ void Assembler::bind_to(Label* L, int pos) {
while (L->is_linked()) {
Displacement disp = disp_at(L);
int fixup_pos = L->pos();
- if (disp.type() == Displacement::CODE_RELATIVE) {
+ if (disp.type() == Displacement::CODE_ABSOLUTE) {
+ long_at_put(fixup_pos, reinterpret_cast<int>(buffer_ + pos));
+ internal_reference_positions_.push_back(fixup_pos);
+ } else if (disp.type() == Displacement::CODE_RELATIVE) {
// Relative to Code* heap object pointer.
long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
} else {
@@ -2579,28 +2629,6 @@ void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
}
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
- if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -2642,15 +2670,10 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- if (*p != 0) { // 0 means uninitialized.
- *p += pc_delta;
- }
- }
+ // Relocate internal references.
+ for (auto pos : internal_reference_positions_) {
+ int32_t* p = reinterpret_cast<int32_t*>(buffer_ + pos);
+ *p += pc_delta;
}
DCHECK(!buffer_overflow());
@@ -2700,7 +2723,21 @@ void Assembler::emit_operand(Register reg, const Operand& adr) {
if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
RecordRelocInfo(adr.rmode_);
- pc_ += sizeof(int32_t);
+ if (adr.rmode_ == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
+ emit_label(*reinterpret_cast<Label**>(pc_));
+ } else {
+ pc_ += sizeof(int32_t);
+ }
+ }
+}
+
+
+void Assembler::emit_label(Label* label) {
+ if (label->is_bound()) {
+ internal_reference_positions_.push_back(pc_offset());
+ emit(reinterpret_cast<uint32_t>(buffer_ + label->pos()));
+ } else {
+ emit_disp(label, Displacement::CODE_ABSOLUTE);
}
}
@@ -2725,6 +2762,13 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dd(Label* label) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ emit_label(label);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
DCHECK(!RelocInfo::IsNone(rmode));
// Don't record external references unless the heap will be serialized.
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index b913f7afc8..a17b539bac 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -37,6 +37,9 @@
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
+#include <deque>
+
+#include "src/assembler.h"
#include "src/isolate.h"
#include "src/serialize.h"
@@ -357,6 +360,11 @@ class Operand BASE_EMBEDDED {
int32_t disp,
RelocInfo::Mode rmode = RelocInfo::NONE32);
+ static Operand JumpTable(Register index, ScaleFactor scale, Label* table) {
+ return Operand(index, scale, reinterpret_cast<int32_t>(table),
+ RelocInfo::INTERNAL_REFERENCE);
+ }
+
static Operand StaticVariable(const ExternalReference& ext) {
return Operand(reinterpret_cast<int32_t>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
@@ -430,11 +438,7 @@ class Operand BASE_EMBEDDED {
class Displacement BASE_EMBEDDED {
public:
- enum Type {
- UNCONDITIONAL_JUMP,
- CODE_RELATIVE,
- OTHER
- };
+ enum Type { UNCONDITIONAL_JUMP, CODE_RELATIVE, OTHER, CODE_ABSOLUTE };
int data() const { return data_; }
Type type() const { return TypeField::decode(data_); }
@@ -804,6 +808,7 @@ class Assembler : public AssemblerBase {
void int3();
void nop();
void ret(int imm16);
+ void ud2();
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -1256,14 +1261,18 @@ class Assembler : public AssemblerBase {
void RecordDebugBreakSlot();
// Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable, or provide "force = true" flag to always
- // write a comment.
- void RecordComment(const char* msg, bool force = false);
+ // Use --code-comments to enable.
+ void RecordComment(const char* msg);
+
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, const int raw_position);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dd(Label* label);
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
@@ -1339,6 +1348,8 @@ class Assembler : public AssemblerBase {
void emit_operand(Register reg, const Operand& adr);
+ void emit_label(Label* label);
+
void emit_farith(int b1, int b2, int i);
// Emit vex prefix
@@ -1365,6 +1376,11 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class EnsureSpace;
+ // Internal reference positions, required for (potential) patching in
+ // GrowBuffer(); contains only those internal references whose labels
+ // are already bound.
+ std::deque<int> internal_reference_positions_;
+
// code generation
RelocInfoWriter reloc_info_writer;
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 5767489660..9aa4e073f7 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -100,6 +100,42 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
+static void Generate_Runtime_NewObject(MacroAssembler* masm,
+ bool create_memento,
+ Register original_constructor,
+ Label* count_incremented,
+ Label* allocated) {
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ mov(edi, Operand(esp, kPointerSize * 2));
+ __ push(edi);
+ offset = kPointerSize;
+ }
+
+ // Must restore esi (context) and edi (constructor) before calling
+ // runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ __ push(edi);
+ __ push(original_constructor);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(ebx, eax); // store result in ebx
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ if (create_memento) {
+ __ jmp(count_incremented);
+ } else {
+ __ jmp(allocated);
+ }
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
@@ -107,6 +143,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- eax: number of arguments
// -- edi: constructor function
// -- ebx: allocation site or undefined
+ // -- edx: original constructor
// -----------------------------------
// Should never create mementos for api functions.
@@ -128,9 +165,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push the function to invoke on the stack.
__ push(edi);
+ __ cmp(edx, edi);
+ Label normal_new;
+ Label count_incremented;
+ Label allocated;
+ __ j(equal, &normal_new);
+
+ // Original constructor and function are different.
+ Generate_Runtime_NewObject(masm, create_memento, edx, &count_incremented,
+ &allocated);
+ __ bind(&normal_new);
+
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
+ Label rt_call;
if (FLAG_inline_new) {
Label undo_allocation;
ExternalReference debug_step_in_fp =
@@ -344,34 +392,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
__ bind(&rt_call);
- int offset = 0;
- if (create_memento) {
- // Get the cell or allocation site.
- __ mov(edi, Operand(esp, kPointerSize * 2));
- __ push(edi);
- offset = kPointerSize;
- }
-
- // Must restore esi (context) and edi (constructor) before calling runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- // edi: function (constructor)
- __ push(edi);
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
- } else {
- __ CallRuntime(Runtime::kNewObject, 1);
- }
- __ mov(ebx, eax); // store result in ebx
-
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
-
+ Generate_Runtime_NewObject(masm, create_memento, edi, &count_incremented,
+ &allocated);
// New object allocated.
// ebx: newly allocated object
__ bind(&allocated);
@@ -478,6 +500,80 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments
+ // -- edi: constructor function
+ // -- ebx: allocation site or undefined
+ // -- edx: original constructor
+ // -----------------------------------
+
+ // TODO(dslomov): support pretenuring
+ CHECK(!FLAG_pretenuring_call_new);
+
+ {
+ FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve actual arguments count.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ SmiUntag(eax);
+
+ // Push new.target.
+ __ push(edx);
+
+ // receiver is the hole.
+ __ push(Immediate(masm->isolate()->factory()->the_hole_value()));
+
+ // Set up pointer to last argument.
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(ecx, eax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(ebx, ecx, times_4, 0));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+
+ __ inc(eax); // Pushed new.target.
+
+
+ // Handle step in.
+ Label skip_step_in;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+ __ j(equal, &skip_step_in);
+
+ __ push(eax);
+ __ push(edi);
+ __ push(edi);
+ __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
+ __ pop(edi);
+ __ pop(eax);
+
+ __ bind(&skip_step_in);
+
+ // Invoke function.
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
+
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ mov(ebx, Operand(esp, 0));
+ }
+
+ __ pop(ecx); // Return address.
+ __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));
+ __ push(ecx);
+ __ ret(0);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -1096,6 +1192,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Get the Array function.
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
+ __ mov(edx, edi);
if (FLAG_debug_code) {
// Initial map for the builtin Array function should be a map.
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index b75ae3a531..9b7d9023bd 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -738,6 +738,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ CHECK(!has_new_target());
// The key is in edx and the parameter count is in eax.
DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
@@ -804,6 +805,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// esp[8] : receiver displacement
// esp[12] : function
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -832,6 +835,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// ebx = parameter count (tagged)
__ mov(ebx, Operand(esp, 1 * kPointerSize));
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
// TODO(rossberg): Factor out some of the bits that are shared with the other
// Generate* functions.
@@ -1071,9 +1076,15 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
+
+ if (has_new_target()) {
+ // Subtract 1 from smi-tagged arguments count.
+ __ sub(ecx, Immediate(2));
+ }
+
__ lea(edx, Operand(edx, ecx, times_2,
StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
__ mov(Operand(esp, 2 * kPointerSize), edx);
// Try the new space allocation. Start out with computing the size of
@@ -1148,6 +1159,31 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
}
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : index of rest parameter
+ // esp[8] : number of parameters
+ // esp[12] : receiver displacement
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 3 * kPointerSize), edx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+}
+
+
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1706,7 +1742,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ mov(ecx, Immediate(kSmiTagMask));
__ and_(ecx, eax);
__ test(ecx, edx);
@@ -2149,6 +2185,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(ebx);
}
+ if (IsSuperConstructorCall()) {
+ __ mov(edx, Operand(esp, eax, times_pointer_size, 2 * kPointerSize));
+ } else {
+ // Pass original constructor to construct stub.
+ __ mov(edx, edi);
+ }
+
// Jump to the function-specific construct stub.
Register jmp_reg = ecx;
__ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
@@ -2189,12 +2232,11 @@ static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// edi - function
// edx - slot id
+ // ebx - vector
Label miss;
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, ebx);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &miss);
@@ -2210,6 +2252,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
__ mov(ebx, ecx);
+ __ mov(edx, edi);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@@ -2230,6 +2273,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
void CallICStub::Generate(MacroAssembler* masm) {
// edi - function
// edx - slot id
+ // ebx - vector
Isolate* isolate = masm->isolate();
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
@@ -2241,13 +2285,31 @@ void CallICStub::Generate(MacroAssembler* masm) {
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, ebx);
-
// The checks. First, does edi match the recorded monomorphic target?
- __ cmp(edi, FieldOperand(ebx, edx, times_half_pointer_size,
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize));
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
__ j(not_equal, &extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(edi, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2276,8 +2338,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&extra_checks_or_miss);
Label uninitialized, miss;
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
__ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ j(equal, &slow_start);
@@ -2321,15 +2381,18 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Update stats.
__ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
- // Store the function.
- __ mov(
- FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
- edi);
+ // Store the function. Use a stub since we need a frame for allocation.
+ // ebx - vector
+ // edx - slot
+ // edi - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(isolate);
+ __ push(edi);
+ __ CallStub(&create_stub);
+ __ pop(edi);
+ }
- // Update the write barrier.
- __ mov(eax, edi);
- __ RecordWriteArray(ebx, eax, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
__ jmp(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
@@ -2354,29 +2417,22 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(ecx, Operand(esp, (arg_count() + 1) * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the function and feedback info.
- __ push(ecx);
- __ push(edi);
- __ push(ebx);
- __ push(edx);
+ // Push the function and feedback info.
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
- ExternalReference miss = ExternalReference(IC_Utility(id),
- masm->isolate());
- __ CallExternalReference(miss, 4);
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 3);
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
- }
+ // Move result to edi and exit the internal frame.
+ __ mov(edi, eax);
}
@@ -2392,6 +2448,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the store buffer overflow stubs are generated first.
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -3775,15 +3832,17 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &miss, Label::kNear);
+ __ GetWeakValue(edi, cell);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, known_map_);
+ __ cmp(ecx, edi);
__ j(not_equal, &miss, Label::kNear);
- __ cmp(ebx, known_map_);
+ __ cmp(ebx, edi);
__ j(not_equal, &miss, Label::kNear);
__ sub(eax, edx);
@@ -4334,6 +4393,20 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
}
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, ebx);
+ CallICStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, ebx);
+ CallIC_ArrayStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4555,9 +4628,10 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : argc (only if argument_count() == ANY)
+ // -- eax : argc (only if argument_count() is ANY or MORE_THAN_ONE)
// -- ebx : AllocationSite or undefined
// -- edi : constructor
+ // -- edx : Original constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
@@ -4577,6 +4651,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(ebx);
}
+ Label subclassing;
+
+ __ cmp(edx, edi);
+ __ j(not_equal, &subclassing);
+
Label no_info;
// If the feedback vector is the undefined value call an array constructor
// that doesn't use AllocationSites.
@@ -4592,6 +4671,30 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ // Subclassing.
+ __ bind(&subclassing);
+ __ pop(ecx); // return address.
+ __ push(edi);
+ __ push(edx);
+
+ // Adjust argc.
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ add(eax, Immediate(2));
+ break;
+ case NONE:
+ __ mov(eax, Immediate(2));
+ break;
+ case ONE:
+ __ mov(eax, Immediate(3));
+ break;
+ }
+
+ __ push(ecx);
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
}
@@ -4681,13 +4784,204 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+// Generates an Operand for saving parameters after PrepareCallApiFunction.
+static Operand ApiParameterOperand(int index) {
+ return Operand(esp, index * kPointerSize);
+}
+
+
+// Prepares stack to put arguments (aligns and so on). Reserves
+// space for return value if needed (assumes the return value is a handle).
+// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
+// etc. Saves context (esi). If space was reserved for return value then
+// stores the pointer to the reserved slot into esi.
+static void PrepareCallApiFunction(MacroAssembler* masm, int argc) {
+ __ EnterApiExitFrame(argc);
+ if (__ emit_debug_code()) {
+ __ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
+ }
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Clobbers ebx, edi and
+// caller-save registers. Restores context. On return removes
+// stack_space * kPointerSize (GCed).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ Operand thunk_last_arg, int stack_space,
+ Operand* stack_space_operand,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
+ Isolate* isolate = masm->isolate();
+
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address(isolate);
+ ExternalReference level_address =
+ ExternalReference::handle_scope_level_address(isolate);
+
+ DCHECK(edx.is(function_address));
+ // Allocate HandleScope in callee-save registers.
+ __ mov(ebx, Operand::StaticVariable(next_address));
+ __ mov(edi, Operand::StaticVariable(limit_address));
+ __ add(Operand::StaticVariable(level_address), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, eax);
+ __ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
+ __ cmpb(Operand(eax, 0), 0);
+ __ j(zero, &profiler_disabled);
+
+ // Additional parameter is the address of the actual getter function.
+ __ mov(thunk_last_arg, function_address);
+ // Call the api function.
+ __ mov(eax, Immediate(thunk_ref));
+ __ call(eax);
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ // Call the api function.
+ __ call(function_address);
+ __ bind(&end_profiler_check);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, eax);
+ __ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label prologue;
+ // Load the value from ReturnValue
+ __ mov(eax, return_value_operand);
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ __ bind(&prologue);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ mov(Operand::StaticVariable(next_address), ebx);
+ __ sub(Operand::StaticVariable(level_address), Immediate(1));
+ __ Assert(above_equal, kInvalidHandleScopeLevel);
+ __ cmp(edi, Operand::StaticVariable(limit_address));
+ __ j(not_equal, &delete_allocated_handles);
+ __ bind(&leave_exit_frame);
+
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address(isolate);
+ __ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(isolate->factory()->the_hole_value()));
+ __ j(not_equal, &promote_scheduled_exception);
+ __ bind(&exception_handled);
+
+#if DEBUG
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = eax;
+ Register map = ecx;
+
+ __ JumpIfSmi(return_value, &ok, Label::kNear);
+ __ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ __ CmpInstanceType(map, LAST_NAME_TYPE);
+ __ j(below_equal, &ok, Label::kNear);
+
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &ok, Label::kNear);
+
+ __ cmp(map, isolate->factory()->heap_number_map());
+ __ j(equal, &ok, Label::kNear);
+
+ __ cmp(return_value, isolate->factory()->undefined_value());
+ __ j(equal, &ok, Label::kNear);
+
+ __ cmp(return_value, isolate->factory()->true_value());
+ __ j(equal, &ok, Label::kNear);
+
+ __ cmp(return_value, isolate->factory()->false_value());
+ __ j(equal, &ok, Label::kNear);
+
+ __ cmp(return_value, isolate->factory()->null_value());
+ __ j(equal, &ok, Label::kNear);
+
+ __ Abort(kAPICallReturnedInvalidObject);
+
+ __ bind(&ok);
+#endif
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ mov(esi, *context_restore_operand);
+ }
+ if (stack_space_operand != nullptr) {
+ __ mov(ebx, *stack_space_operand);
+ }
+ __ LeaveApiExitFrame(!restore_context);
+ if (stack_space_operand != nullptr) {
+ DCHECK_EQ(0, stack_space);
+ __ pop(ecx);
+ __ add(esp, ebx);
+ __ jmp(ecx);
+ } else {
+ __ ret(stack_space * kPointerSize);
+ }
+
+ __ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ __ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ ExternalReference delete_extensions =
+ ExternalReference::delete_handle_scope_extensions(isolate);
+ __ bind(&delete_allocated_handles);
+ __ mov(Operand::StaticVariable(limit_address), edi);
+ __ mov(edi, eax);
+ __ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate)));
+ __ mov(eax, Immediate(delete_extensions));
+ __ call(eax);
+ __ mov(eax, edi);
+ __ jmp(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
- // -- eax : callee
+ // -- edi : callee
// -- ebx : call_data
// -- ecx : holder
// -- edx : api_function_address
// -- esi : context
+ // -- eax : number of arguments if argc is a register
// --
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -4696,16 +4990,12 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Register callee = eax;
+ Register callee = edi;
Register call_data = ebx;
Register holder = ecx;
Register api_function_address = edx;
- Register return_address = edi;
Register context = esi;
-
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
+ Register return_address = eax;
typedef FunctionCallbackArguments FCA;
@@ -4718,12 +5008,17 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- __ pop(return_address);
+ DCHECK(argc.is_immediate() || eax.is(argc.reg()));
- // context save
- __ push(context);
- // load context from callee
- __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+ if (argc.is_immediate()) {
+ __ pop(return_address);
+ // context save.
+ __ push(context);
+ } else {
+ // pop return address and save context
+ __ xchg(context, Operand(esp, 0));
+ return_address = context;
+ }
// callee
__ push(callee);
@@ -4734,9 +5029,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register scratch = call_data;
if (!call_data_undefined) {
// return value
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ push(Immediate(masm->isolate()->factory()->undefined_value()));
// return value default
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ push(Immediate(masm->isolate()->factory()->undefined_value()));
} else {
// return value
__ push(scratch);
@@ -4744,15 +5039,18 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ push(scratch);
}
// isolate
- __ push(Immediate(reinterpret_cast<int>(isolate())));
+ __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
// holder
__ push(holder);
__ mov(scratch, esp);
- // return address
+ // push return address
__ push(return_address);
+ // load context from callee
+ __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+
// API function gets reference to the v8::Arguments. If CPU profiler
// is enabled wrapper function will be called and we need to pass
// address of the callback as additional parameter, always allocate
@@ -4763,41 +5061,76 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
+ PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
// FunctionCallbackInfo::implicit_args_.
__ mov(ApiParameterOperand(2), scratch);
- __ add(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), scratch);
- // FunctionCallbackInfo::length_.
- __ Move(ApiParameterOperand(4), Immediate(argc));
- // FunctionCallbackInfo::is_construct_call_.
- __ Move(ApiParameterOperand(5), Immediate(0));
+ if (argc.is_immediate()) {
+ __ add(scratch,
+ Immediate((argc.immediate() + FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Move(ApiParameterOperand(4), Immediate(argc.immediate()));
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Move(ApiParameterOperand(5), Immediate(0));
+ } else {
+ __ lea(scratch, Operand(scratch, argc.reg(), times_pointer_size,
+ (FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ mov(ApiParameterOperand(4), argc.reg());
+ // FunctionCallbackInfo::is_construct_call_.
+ __ lea(argc.reg(), Operand(argc.reg(), times_pointer_size,
+ (FCA::kArgsLength + 1) * kPointerSize));
+ __ mov(ApiParameterOperand(5), argc.reg());
+ }
// v8::InvocationCallback's argument.
__ lea(scratch, ApiParameterOperand(2));
__ mov(ApiParameterOperand(0), scratch);
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
Operand context_restore_operand(ebp,
(2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (is_store) {
+ if (return_first_arg) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
Operand return_value_operand(ebp, return_value_offset * kPointerSize);
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- ApiParameterOperand(1),
- argc + FCA::kArgsLength + 1,
- return_value_operand,
- &context_restore_operand);
+ int stack_space = 0;
+ Operand is_construct_call_operand = ApiParameterOperand(5);
+ Operand* stack_space_operand = &is_construct_call_operand;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_operand = nullptr;
+ }
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ ApiParameterOperand(1), stack_space,
+ stack_space_operand, return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
+ call_data_undefined);
+}
+
+
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
@@ -4824,7 +5157,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// load address of name
__ lea(scratch, Operand(esp, 1 * kPointerSize));
- __ PrepareCallApiFunction(kApiArgc);
+ PrepareCallApiFunction(masm, kApiArgc);
__ mov(ApiParameterOperand(0), scratch); // name.
__ add(scratch, Immediate(kPointerSize));
__ mov(ApiParameterOperand(1), scratch); // arguments pointer.
@@ -4832,12 +5165,9 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- ApiParameterOperand(2),
- kStackSpace,
- Operand(ebp, 7 * kPointerSize),
- NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ ApiParameterOperand(2), kStackSpace, nullptr,
+ Operand(ebp, 7 * kPointerSize), NULL);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index e451fcc9e6..27e308d298 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -212,7 +212,8 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
- int parameter_count = function->shared()->formal_parameter_count() + 1;
+ int parameter_count =
+ function->shared()->internal_formal_parameter_count() + 1;
unsigned input_frame_size = input_->GetFrameSize();
unsigned alignment_state_offset =
input_frame_size - parameter_count * kPointerSize -
@@ -244,6 +245,9 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pushad();
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ __ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
+
const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
kDoubleRegsSize;
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index bf88f69c96..576c7393cc 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -1035,6 +1035,8 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
// Returns NULL if the instruction is not handled here.
static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
+ case 0x0B:
+ return "ud2";
case 0x18: return "prefetch";
case 0xA2: return "cpuid";
case 0xBE: return "movsx_b";
@@ -1215,7 +1217,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data[7] == 0) {
AppendToBuffer("nop"); // 8 byte nop.
data += 8;
- } else if (f0byte == 0xA2 || f0byte == 0x31) {
+ } else if (f0byte == 0x0B || f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
} else if (f0byte == 0x28) {
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 1ba4095715..cf181597aa 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -114,7 +114,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ if (is_sloppy(info->language_mode()) && !info->is_native()) {
Label ok;
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@@ -143,7 +143,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!info->function()->is_generator() || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -190,7 +190,7 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in edi.
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ push(edi);
- __ Push(info->scope()->GetScopeInfo());
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
@@ -236,6 +236,26 @@ void FullCodeGenerator::Generate() {
}
}
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ lea(edx,
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ push(edx);
+ __ push(Immediate(Smi::FromInt(num_parameters)));
+ __ push(Immediate(Smi::FromInt(rest_index)));
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, eax, ebx, edx);
+ }
+
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
@@ -257,14 +277,18 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type);
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+ ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, eax, ebx, edx);
@@ -413,7 +437,11 @@ void FullCodeGenerator::EmitReturnSequence() {
int no_frame_start = masm_->pc_offset();
__ pop(ebp);
- int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ int arg_count = info_->scope()->num_parameters() + 1;
+ if (IsSubclassConstructor(info_->function()->kind())) {
+ arg_count++;
+ }
+ int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
@@ -865,15 +893,16 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
+ ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
- DCHECK(variable->interface()->IsFrozen());
+ DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(eax, scope_->ContextChainLength(scope_->ScriptScope()));
- __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
+ __ mov(eax, ContextOperand(eax, descriptor->Index()));
__ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
// Assign it.
@@ -1184,6 +1213,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
// Generate code for the body of the loop.
@@ -1221,7 +1251,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
@@ -1460,6 +1490,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
+ } else if (var->is_this()) {
+ CHECK(info_->function() != nullptr &&
+ (info_->function()->kind() & kSubclassConstructor) != 0);
+ // TODO(dslomov): implement 'this' hole check elimination.
+ skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
@@ -1617,11 +1652,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
__ push(eax); // Save result on the stack
@@ -1662,7 +1699,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
if (property->emit_store()) {
EmitSetHomeObjectIfNeeded(value, 2);
- __ push(Immediate(Smi::FromInt(SLOPPY))); // Strict mode
+ __ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
__ Drop(3);
@@ -1671,17 +1708,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::PROTOTYPE:
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(value);
- if (property->emit_store()) {
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- __ Drop(2);
- }
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
break;
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = value;
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = value;
+ }
break;
}
}
@@ -1701,6 +1739,65 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(eax); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ break;
+ }
+ }
+ }
+
if (expr->has_function()) {
DCHECK(result_saved);
__ push(Operand(esp, 0));
@@ -1756,6 +1853,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
bool result_saved = false; // Is the result saved to the stack?
@@ -1896,18 +1994,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
- mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
+ EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
@@ -2288,7 +2382,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left,
Expression* right) {
// Do combined smi check of the operands. Left operand is on the
@@ -2302,7 +2395,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2394,37 +2487,35 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
- Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- DCHECK(key != NULL);
if (property->is_static()) {
__ push(Operand(esp, kPointerSize)); // constructor
} else {
__ push(Operand(esp, 0)); // prototype
}
- VisitForStackValue(key);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
- __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ __ push(Immediate(Smi::FromInt(DONT_ENUM)));
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
break;
case ObjectLiteral::Property::SETTER:
- __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ __ push(Immediate(Smi::FromInt(DONT_ENUM)));
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
break;
-
- default:
- UNREACHABLE();
}
}
@@ -2436,11 +2527,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(edx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2515,7 +2604,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
break;
}
@@ -2581,7 +2670,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(strict_mode())));
+ __ push(Immediate(Smi::FromInt(language_mode())));
__ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
// Assignment to var or initializing assignment to let/const in harmony
@@ -2596,7 +2685,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+ } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
}
@@ -2631,8 +2720,8 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ push(Immediate(key->value()));
__ push(eax);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
+ __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
4);
}
@@ -2643,9 +2732,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
__ push(eax);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime(
+ (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
@@ -2660,7 +2750,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2683,8 +2774,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(eax);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
@@ -2699,8 +2788,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
- context()->Plug(eax);
}
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(eax);
}
@@ -2847,9 +2937,8 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(
- isolate(), arg_count, call_type);
- __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2877,7 +2966,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the language mode.
- __ push(Immediate(Smi::FromInt(strict_mode())));
+ __ push(Immediate(Smi::FromInt(language_mode())));
// Push the start position of the scope the calls resides in.
__ push(Immediate(Smi::FromInt(scope()->start_position())));
@@ -2887,8 +2976,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
- DCHECK(super_ref != NULL);
+void FullCodeGenerator::EmitLoadSuperConstructor() {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kGetPrototype, 1);
}
@@ -2944,7 +3032,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::GLOBAL_CALL) {
EmitCallWithLoadIC(expr);
-
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
VariableProxy* proxy = callee->AsVariableProxy();
@@ -3003,11 +3090,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
}
} else if (call_type == Call::SUPER_CALL) {
- SuperReference* super_ref = callee->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ push(result_register());
- VisitForStackValue(super_ref->this_var());
- EmitCall(expr, CallICState::METHOD);
+ EmitSuperConstructorCall(expr);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -3035,12 +3118,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- if (expr->expression()->IsSuperReference()) {
- EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
- __ push(result_register());
- } else {
- VisitForStackValue(expr->expression());
- }
+ DCHECK(!expr->expression()->IsSuperReference());
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3074,6 +3153,66 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ if (!ValidateSuperCall(expr)) return;
+
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(eax, new_target_var);
+ __ push(eax);
+
+ EmitLoadSuperConstructor();
+ __ push(result_register());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into edi and eax.
+ __ Move(eax, Immediate(arg_count));
+ __ mov(edi, Operand(esp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ UNREACHABLE();
+ /* TODO(dslomov): support pretenuring.
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ */
+ }
+
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ RecordJSReturnSite(expr);
+
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ Variable* this_var = super_ref->this_var()->var();
+ GetVar(ecx, this_var);
+ __ cmp(ecx, isolate()->factory()->the_hole_value());
+ Label uninitialized_this;
+ __ j(equal, &uninitialized_this);
+ __ push(Immediate(this_var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
+ EmitVariableAssignment(this_var, Token::INIT_CONST);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3608,7 +3747,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3945,6 +4084,56 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(eax, new_target_var);
+ __ push(eax);
+
+ EmitLoadSuperConstructor();
+ __ push(result_register());
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, args_set_up, runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame);
+ // default constructor has no arguments, so no adaptor frame means no args.
+ __ mov(eax, Immediate(0));
+ __ jmp(&args_set_up);
+
+ // Copy arguments from adaptor frame.
+ {
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(ecx);
+
+ // Subtract 1 from arguments count, for new.target.
+ __ sub(ecx, Immediate(1));
+ __ mov(eax, ecx);
+ __ lea(edx, Operand(edx, ecx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ Label loop;
+ __ bind(&loop);
+ __ push(Operand(edx, -1 * kPointerSize));
+ __ sub(edx, Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+ }
+
+ __ bind(&args_set_up);
+
+ __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
+ __ mov(ebx, Immediate(isolate()->factory()->undefined_value()));
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpConstructResultStub stub(isolate());
@@ -3964,7 +4153,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(2, args->length());
- DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
@@ -4393,14 +4582,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ push(Immediate(Smi::FromInt(strict_mode())));
+ __ push(Immediate(Smi::FromInt(language_mode())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- DCHECK(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(is_sloppy(language_mode()) || var->is_this());
if (var->IsUnallocated()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
@@ -4621,6 +4810,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4655,8 +4845,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
- NO_OVERWRITE).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4726,7 +4916,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(StoreDescriptor::NameRegister());
__ pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 6c77ef8f81..b0e57fc2e2 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -101,7 +101,19 @@ void FastCloneShallowObjectDescriptor::Initialize(
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {esi, ebx, edx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ebx, edx, edi};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
@@ -128,6 +140,16 @@ void CallFunctionWithFeedbackDescriptor::Initialize(
}
+void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edi, edx, ebx};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
@@ -296,7 +318,28 @@ void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {
esi, // context
- eax, // callee
+ edi, // callee
+ ebx, // call_data
+ ecx, // holder
+ edx, // api_function_address
+ eax, // actual number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Integer32(), // actual number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ edi, // callee
ebx, // call_data
ecx, // holder
edx, // api_function_address
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 03a0d8a3df..d750cb87d5 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -75,7 +75,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -141,8 +140,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() &&
- info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
// +1 for return address.
@@ -386,7 +384,7 @@ bool LCodeGen::GenerateJumpTable() {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
@@ -821,7 +819,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- const char* detail,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -863,18 +861,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
__ bind(&done);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() ||
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
@@ -888,11 +887,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- const char* detail) {
+ Deoptimizer::DeoptReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, instr, detail, bailout_type);
+ DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
}
@@ -914,6 +913,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1121,7 +1121,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
@@ -1138,7 +1138,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, "division by zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1153,7 +1153,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr, "minus zero");
+ DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -1175,7 +1175,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr, "division by zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1186,7 +1186,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, "minus zero");
+ DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
@@ -1205,7 +1205,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@@ -1225,19 +1225,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr, "overflow");
+ DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, "lost precision");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1258,7 +1258,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, "division by zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1266,7 +1266,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
@@ -1276,7 +1276,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1296,7 +1296,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, "division by zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1305,7 +1305,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1315,7 +1315,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, "overflow");
+ DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1326,7 +1326,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr, "lost precision");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1348,13 +1348,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
return;
}
@@ -1381,7 +1381,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, "division by zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1389,7 +1389,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1436,7 +1436,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, "division by zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1445,7 +1445,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1455,7 +1455,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, "overflow");
+ DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1533,7 +1533,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1543,15 +1543,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, "minus zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr, "minus zero");
+ DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
}
__ bind(&done);
}
@@ -1624,7 +1624,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, "negative value");
+ DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
}
break;
case Token::SHL:
@@ -1641,7 +1641,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, "negative value");
+ DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
} else {
__ ror(ToRegister(left), shift_count);
}
@@ -1656,7 +1656,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, "negative value");
+ DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
}
break;
case Token::SHL:
@@ -1667,7 +1667,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
} else {
__ shl(ToRegister(left), shift_count);
}
@@ -1693,7 +1693,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
@@ -1709,38 +1709,37 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
- double v = instr->value();
- uint64_t int_val = bit_cast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ uint64_t const bits = instr->bits();
+ uint32_t const lower = static_cast<uint32_t>(bits);
+ uint32_t const upper = static_cast<uint32_t>(bits >> 32);
DCHECK(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
- if (int_val == 0) {
- __ xorps(res, res);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ if (bits == 0u) {
+ __ xorps(result, result);
} else {
Register temp = ToRegister(instr->temp());
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope2(masm(), SSE4_1);
if (lower != 0) {
__ Move(temp, Immediate(lower));
- __ movd(res, Operand(temp));
+ __ movd(result, Operand(temp));
__ Move(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
+ __ pinsrd(result, Operand(temp), 1);
} else {
- __ xorps(res, res);
+ __ xorps(result, result);
__ Move(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
+ __ pinsrd(result, Operand(temp), 1);
}
} else {
__ Move(temp, Immediate(upper));
- __ movd(res, Operand(temp));
- __ psllq(res, 32);
- if (lower != 0) {
+ __ movd(result, Operand(temp));
+ __ psllq(result, 32);
+ if (lower != 0u) {
XMMRegister xmm_scratch = double_scratch0();
__ Move(temp, Immediate(lower));
__ movd(xmm_scratch, Operand(temp));
- __ orps(res, xmm_scratch);
+ __ orps(result, xmm_scratch);
}
}
}
@@ -1777,9 +1776,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(eax));
__ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, "Smi");
+ DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr, "not a date object");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1909,7 +1908,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
}
@@ -1983,19 +1982,43 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
- __ addsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vaddsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ addsd(left, right);
+ }
break;
case Token::SUB:
- __ subsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vsubsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ subsd(left, right);
+ }
break;
case Token::MUL:
- __ mulsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vmulsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ mulsd(left, right);
+ }
break;
case Token::DIV:
- __ divsd(left, right);
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a mulsd depending on the result
- __ movaps(left, left);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vdivsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
+ }
break;
case Token::MOD: {
// Pass two doubles as arguments on the stack.
@@ -2027,8 +2050,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(eax));
DCHECK(ToRegister(instr->result()).is(eax));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2133,7 +2155,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, "Smi");
+ DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
}
Register map = no_reg; // Keep the compiler happy.
@@ -2190,7 +2212,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, "unexpected object");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
}
}
}
@@ -2822,7 +2844,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
}
@@ -2871,7 +2893,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
// Store the value.
@@ -2888,7 +2910,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@@ -2909,7 +2931,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
@@ -3009,7 +3031,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -3102,7 +3124,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr, "negative value");
+ DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3132,7 +3154,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
@@ -3159,10 +3181,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr, "not a Smi");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
}
}
@@ -3309,9 +3331,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, "Smi");
+ DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr, "not a JavaScript object");
+ DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@@ -3337,7 +3359,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr, "too many arguments");
+ DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
__ push(receiver);
__ mov(receiver, length);
@@ -3407,22 +3429,18 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- EDIState edi_state) {
+ int formal_parameter_count, int arity,
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
- if (can_invoke_directly) {
- if (edi_state == EDI_UNINITIALIZED) {
- __ LoadHeapObject(edi, function);
- }
+ Register function_reg = edi;
+ if (can_invoke_directly) {
// Change context.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
// Set eax to arguments count if adaption is not needed. Assumes that eax
// is available to write to at this point.
@@ -3434,7 +3452,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
}
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
@@ -3444,7 +3462,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
}
}
@@ -3566,7 +3584,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
@@ -3613,7 +3631,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, "overflow");
+ DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
__ bind(&is_positive);
}
@@ -3672,20 +3690,20 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ j(not_equal, &non_zero, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
__ bind(&non_zero);
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr, "NaN");
+ DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3694,7 +3712,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
@@ -3704,7 +3722,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3715,7 +3733,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ bind(&done);
}
@@ -3743,7 +3761,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3758,7 +3776,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3774,7 +3792,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// If the sign is positive, we return +0.
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ Move(output_reg, Immediate(0));
__ bind(&done);
@@ -3850,7 +3868,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ JumpIfSmi(tagged_exponent, &no_deopt);
DCHECK(!ecx.is(tagged_exponent));
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3874,9 +3892,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(not_carry, &zero, Label::kNear);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movsd(input_reg, Operand::StaticVariable(nan));
+ __ pcmpeqd(input_reg, input_reg);
__ jmp(&done, Label::kNear);
__ bind(&zero);
ExternalReference ninf =
@@ -3936,9 +3952,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- EDI_CONTAINS_TARGET);
+ instr->arity(), instr);
}
}
@@ -3949,8 +3963,30 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(edx));
+ DCHECK(vector_register.is(ebx));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ mov(vector_register, vector);
+ __ mov(slot_register, Immediate(Smi::FromInt(index)));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4137,7 +4173,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4162,7 +4198,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, "out of bounds");
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
}
}
@@ -4232,8 +4268,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
@@ -4244,13 +4278,10 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value, Label::kNear); // NaN.
-
- __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
- __ bind(&have_value);
+ XMMRegister xmm_scratch = double_scratch0();
+ // Turn potential sNaN value into qNaN.
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ subsd(value, xmm_scratch);
}
__ movsd(double_store_operand, value);
@@ -4320,7 +4351,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4330,7 +4361,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, "memento found");
+ DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4355,7 +4386,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
Immediate(to_map));
// Write barrier.
- DCHECK_NE(instr->temp(), NULL);
+ DCHECK_NOT_NULL(instr->temp());
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
ToRegister(instr->temp()),
kDontSaveFPRegs);
@@ -4676,12 +4707,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr, "overflow");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
@@ -4692,7 +4723,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, "not a Smi");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
} else {
__ AssertSmi(result);
}
@@ -4719,7 +4750,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
}
// Heap number to XMM conversion.
@@ -4732,20 +4763,18 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
__ test_b(temp_reg, 1);
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
if (can_convert_undefined_to_nan) {
__ bind(&convert);
- // Convert undefined (and hole) to NaN.
+ // Convert undefined to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movsd(result_reg, Operand::StaticVariable(nan));
+ __ pcmpeqd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
}
} else {
@@ -4795,26 +4824,27 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
- DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
+ DeoptimizeIf(not_equal, instr,
+ Deoptimizer::kNotAHeapNumberUndefinedBoolean);
__ Move(input_reg, Immediate(0));
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(xmm0));
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ Cvtsi2sd(scratch, Operand(input_reg));
__ ucomisd(xmm0, scratch);
- DeoptimizeIf(not_equal, instr, "lost precision");
- DeoptimizeIf(parity_even, instr, "NaN");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
}
}
@@ -4894,11 +4924,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
&is_nan, &minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, "lost precision");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, "NaN");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, "minus zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
@@ -4920,21 +4950,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
&minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, "lost precision");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, "NaN");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, "minus zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
__ bind(&done);
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, "not a Smi");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
}
@@ -4942,7 +4972,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, "Smi");
+ DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
}
}
@@ -4963,14 +4993,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr, "wrong instance type");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
} else {
- DeoptimizeIf(below, instr, "wrong instance type");
+ DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(last));
- DeoptimizeIf(above, instr, "wrong instance type");
+ DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
}
}
} else {
@@ -4981,12 +5011,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
+ Deoptimizer::kWrongInstanceType);
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr, "wrong instance type");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
}
}
}
@@ -5002,7 +5033,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr, "value mismatch");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
}
@@ -5017,7 +5048,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, "instance migration failed");
+ DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
}
@@ -5071,7 +5102,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr, "wrong map");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
}
__ bind(&success);
@@ -5110,7 +5141,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
@@ -5339,7 +5370,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -5598,17 +5629,17 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr, "undefined");
+ DeoptimizeIf(equal, instr, Deoptimizer::kUndefined);
__ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr, "null");
+ DeoptimizeIf(equal, instr, Deoptimizer::kNull);
__ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, "Smi");
+ DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr, "wrong instance type");
+ DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -5623,7 +5654,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr, "wrong map");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
__ bind(&use_cache);
}
@@ -5646,7 +5677,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr, "no cache");
+ DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
}
@@ -5654,7 +5685,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, "wrong map");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 0918252327..d7dca7c587 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -126,7 +126,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictMode strict_mode() const { return info()->strict_mode(); }
+ LanguageMode language_mode() const { return info()->language_mode(); }
Scope* scope() const { return scope_; }
@@ -191,27 +191,22 @@ class LCodeGen: public LCodeGenBase {
void LoadContextFromDeferred(LOperand* context);
- enum EDIState {
- EDI_UNINITIALIZED,
- EDI_CONTAINS_TARGET
- };
-
- // Generate a direct call to a known function. Expects the function
+ // Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- EDIState edi_state);
+ int formal_parameter_count, int arity,
+ LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
+ void DeoptimizeIf(Condition cc, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
+ void DeoptimizeIf(Condition cc, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 3be2fc41d0..1c8d075dcb 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -274,6 +274,20 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
+void LCallFunction::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ if (hydrogen()->HasVectorAndSlot()) {
+ stream->Add(" (type-feedback-vector ");
+ temp_vector()->PrintTo(stream);
+ stream->Add(" ");
+ temp_slot()->PrintTo(stream);
+ stream->Add(")");
+ }
+}
+
+
void LCallJSFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -765,7 +779,8 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result)
+ : DefineSameAsFirst(result);
}
}
@@ -1280,7 +1295,15 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(edx);
+ vector = FixedTemp(ebx);
+ }
+
+ LCallFunction* call =
+ new (zone()) LCallFunction(context, function, slot, vector);
return MarkAsCall(DefineFixed(call, eax), instr);
}
@@ -2095,9 +2118,8 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
} else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
- double value = instr->DoubleValue();
- bool value_is_zero = bit_cast<uint64_t, double>(value) == 0;
- LOperand* temp = value_is_zero ? NULL : TempRegister();
+ uint64_t const bits = instr->DoubleValueAsBits();
+ LOperand* temp = bits ? TempRegister() : nullptr;
return DefineAsRegister(new(zone()) LConstantD(temp));
} else if (r.IsExternal()) {
return DefineAsRegister(new(zone()) LConstantE);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 49eba66ed1..3f591705d2 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -1344,7 +1344,7 @@ class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- double value() const { return hydrogen()->DoubleValue(); }
+ uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
};
@@ -1949,19 +1949,25 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- explicit LCallFunction(LOperand* context, LOperand* function) {
+ LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = function;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+ void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -2228,7 +2234,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
@@ -2285,7 +2291,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 38259d7651..53ffa39357 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -675,7 +675,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
XMMRegister scratch2,
Label* fail,
int elements_offset) {
- Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
+ Label smi_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
CheckMap(maybe_number,
@@ -683,31 +683,10 @@ void MacroAssembler::StoreNumberToDoubleElements(
fail,
DONT_DO_SMI_CHECK);
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- cmp(FieldOperand(maybe_number, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- j(greater_equal, &maybe_nan, Label::kNear);
-
- bind(&not_nan);
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- movsd(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
- scratch2);
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- j(greater, &is_nan, Label::kNear);
- cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
- j(zero, &not_nan);
- bind(&is_nan);
- movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
- jmp(&have_double_value, Label::kNear);
+ // Double value, turn potential sNaN into qNaN.
+ Move(scratch2, 1.0);
+ mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ jmp(&done, Label::kNear);
bind(&smi_value);
// Value is a smi. Convert to a double and store.
@@ -715,10 +694,10 @@ void MacroAssembler::StoreNumberToDoubleElements(
mov(scratch1, maybe_number);
SmiUntag(scratch1);
Cvtsi2sd(scratch2, scratch1);
+ bind(&done);
movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
- bind(&done);
}
@@ -1359,7 +1338,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Check that the value is a field property.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- DCHECK_EQ(FIELD, 0);
+ DCHECK_EQ(DATA, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
j(not_zero, miss);
@@ -2084,169 +2063,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-Operand ApiParameterOperand(int index) {
- return Operand(esp, index * kPointerSize);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int argc) {
- EnterApiExitFrame(argc);
- if (emit_debug_code()) {
- mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address,
- ExternalReference thunk_ref,
- Operand thunk_last_arg,
- int stack_space,
- Operand return_value_operand,
- Operand* context_restore_operand) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- ExternalReference limit_address =
- ExternalReference::handle_scope_limit_address(isolate());
- ExternalReference level_address =
- ExternalReference::handle_scope_level_address(isolate());
-
- DCHECK(edx.is(function_address));
- // Allocate HandleScope in callee-save registers.
- mov(ebx, Operand::StaticVariable(next_address));
- mov(edi, Operand::StaticVariable(limit_address));
- add(Operand::StaticVariable(level_address), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, eax);
- mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
-
- Label profiler_disabled;
- Label end_profiler_check;
- mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
- cmpb(Operand(eax, 0), 0);
- j(zero, &profiler_disabled);
-
- // Additional parameter is the address of the actual getter function.
- mov(thunk_last_arg, function_address);
- // Call the api function.
- mov(eax, Immediate(thunk_ref));
- call(eax);
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- // Call the api function.
- call(function_address);
- bind(&end_profiler_check);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, eax);
- mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- Label prologue;
- // Load the value from ReturnValue
- mov(eax, return_value_operand);
-
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- bind(&prologue);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- mov(Operand::StaticVariable(next_address), ebx);
- sub(Operand::StaticVariable(level_address), Immediate(1));
- Assert(above_equal, kInvalidHandleScopeLevel);
- cmp(edi, Operand::StaticVariable(limit_address));
- j(not_equal, &delete_allocated_handles);
- bind(&leave_exit_frame);
-
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate());
- cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(isolate()->factory()->the_hole_value()));
- j(not_equal, &promote_scheduled_exception);
- bind(&exception_handled);
-
-#if ENABLE_EXTRA_CHECKS
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = eax;
- Register map = ecx;
-
- JumpIfSmi(return_value, &ok, Label::kNear);
- mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- j(below, &ok, Label::kNear);
-
- CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- j(above_equal, &ok, Label::kNear);
-
- cmp(map, isolate()->factory()->heap_number_map());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->undefined_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->true_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->false_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->null_value());
- j(equal, &ok, Label::kNear);
-
- Abort(kAPICallReturnedInvalidObject);
-
- bind(&ok);
-#endif
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- mov(esi, *context_restore_operand);
- }
- LeaveApiExitFrame(!restore_context);
- ret(stack_space * kPointerSize);
-
- bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
- }
- jmp(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- ExternalReference delete_extensions =
- ExternalReference::delete_handle_scope_extensions(isolate());
- bind(&delete_allocated_handles);
- mov(Operand::StaticVariable(limit_address), edi);
- mov(edi, eax);
- mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate())));
- mov(eax, Immediate(delete_extensions));
- call(eax);
- mov(eax, edi);
- jmp(&leave_exit_frame);
-}
-
-
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
@@ -2587,10 +2403,15 @@ void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
}
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, cell);
mov(value, FieldOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+ Label* miss) {
+ GetWeakValue(value, cell);
JumpIfSmi(value, miss);
}
@@ -2855,6 +2676,18 @@ void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
}
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
+ : AccessorPair::kSetterOffset;
+ mov(dst, FieldOperand(dst, offset));
+}
+
+
void MacroAssembler::LoadPowerOf2(XMMRegister dst,
Register scratch,
int power) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 83f6216445..e62c7d8b4d 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -298,6 +298,8 @@ class MacroAssembler: public Assembler {
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+ void GetWeakValue(Register value, Handle<WeakCell> cell);
+
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
@@ -517,6 +519,8 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template<typename Field>
void DecodeField(Register reg) {
@@ -791,24 +795,6 @@ class MacroAssembler: public Assembler {
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
- // Prepares stack to put arguments (aligns and so on). Reserves
- // space for return value if needed (assumes the return value is a handle).
- // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
- // etc. Saves context (esi). If space was reserved for return value then
- // stores the pointer to the reserved slot into esi.
- void PrepareCallApiFunction(int argc);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Clobbers ebx, edi and
- // caller-save registers. Restores context. On return removes
- // stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref,
- Operand thunk_last_arg,
- int stack_space,
- Operand return_value_operand,
- Operand* context_restore_operand);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
@@ -1089,10 +1075,6 @@ inline Operand GlobalObjectOperand() {
}
-// Generates an Operand for saving parameters after PrepareCallApiFunction.
-Operand ApiParameterOperand(int index);
-
-
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 4118db8819..a4095055ea 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -78,12 +78,11 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
+ Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 8f6499cba9..2927a26077 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -15,7 +15,8 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerIA32(Mode mode, int registers_to_save, Zone* zone);
+ RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
virtual ~RegExpMacroAssemblerIA32();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 9905e4eb7d..bacf44dbe4 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -17,8 +17,8 @@ namespace internal {
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
@@ -27,18 +27,21 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- if (!getter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ ldr(receiver,
+ __ ldr(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ push(receiver);
ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(r1, holder, accessor_index, ACCESSOR_GETTER);
+ __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -53,8 +56,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- lr : return address
// -----------------------------------
@@ -64,18 +67,22 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Save value register, so we can restore it later.
__ push(value());
- if (!setter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ ldr(receiver,
+ __ ldr(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ Push(receiver, value());
ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(r1, holder, accessor_index, ACCESSOR_SETTER);
+ __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -192,7 +199,8 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Register scratch, Label* miss) {
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
- __ mov(scratch, Operand(cell));
+ Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
__ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
@@ -204,16 +212,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
__ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Operand(interceptor));
- __ push(scratch);
__ push(receiver);
__ push(holder);
}
@@ -229,37 +231,48 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!accessor_holder.is(scratch_in));
DCHECK(!receiver.is(scratch_in));
__ push(receiver);
// Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ push(arg);
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch_in.is(store_parameter));
+ __ push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
// Abi for CallApiFunctionStub.
Register callee = r0;
- Register call_data = r4;
+ Register data = r4;
Register holder = r2;
Register api_function_address = r1;
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
+
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ Move(holder, api_holder);
+ __ ldr(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ ldr(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
+ __ ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -267,23 +280,19 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ Move(callee, function);
-
bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ Move(call_data, api_call_info);
- __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ Move(call_data, call_data_obj);
+ __ ldr(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ ldr(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
@@ -294,7 +303,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
__ TailCallStub(&stub);
}
@@ -377,13 +386,17 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
- __ ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
- __ CompareMap(scratch1(), it.Current(), &do_store);
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
it.Advance();
if (it.Done()) {
__ b(ne, miss_label);
@@ -400,7 +413,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+ Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -412,8 +425,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
@@ -549,12 +562,16 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
DCHECK(!scratch3().is(reg));
DCHECK(!scratch4().is(reg));
__ push(receiver());
- if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch3(), callback);
- __ ldr(scratch3(),
- FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+ // Push data from ExecutableAccessorInfo.
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ Move(scratch3(), data);
} else {
- __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch3(), cell);
}
__ push(scratch3());
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
@@ -660,7 +677,15 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(receiver()); // receiver
__ push(holder_reg);
- __ mov(ip, Operand(callback)); // callback info
+
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ __ mov(ip, Operand(callback));
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ mov(ip, Operand(cell));
+ }
__ push(ip);
__ mov(ip, Operand(name));
__ Push(ip, value());
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index 70a5d84e93..093a7c1bb2 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -158,12 +158,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* not_fast_array,
- Label* out_of_range) {
+ Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -172,8 +170,6 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
- // elements - holds the elements of the receiver on exit.
- //
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
@@ -181,34 +177,59 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// Scratch registers:
//
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
+ // elements - holds the elements of the receiver and its prototypes.
+ //
+ // scratch1 - used to hold elements length, bit fields, base addresses.
//
- // scratch2 - used to hold the loaded value.
+ // scratch2 - used to hold maps, prototypes, and the loaded value.
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, return_undefined;
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ b(ne, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
+ __ AssertFastElements(elements);
+
// Check that the key (index) is within bounds.
__ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch1));
- __ b(hs, out_of_range);
+ __ b(lo, &in_bounds);
+ // Out-of-bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ __ cmp(key, Operand(0));
+ __ b(lt, slow); // Negative keys can't take the fast OOB path.
+ __ bind(&check_prototypes);
+ __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&check_next_prototype);
+ __ ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
+ // scratch2: current prototype
+ __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
+ __ b(eq, &return_undefined);
+ __ ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
+ __ ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ // elements: elements of current prototype
+ // scratch2: map of current prototype
+ __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
+ __ b(lo, slow);
+ __ ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
+ __ tst(scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
+ __ b(ne, slow);
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+ __ b(ne, slow);
+ __ jmp(&check_next_prototype);
+
+ __ bind(&return_undefined);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&in_bounds);
// Fast case: Do the load.
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ b(eq, out_of_range);
+ __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ b(eq, &check_prototypes);
__ mov(result, scratch2);
+ __ bind(&done);
}
@@ -444,7 +465,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -468,7 +489,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r0, r3, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
__ Ret();
@@ -498,94 +519,35 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
Map::kHasNamedInterceptor, &slow);
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r4, ip);
__ b(eq, &probe_dictionary);
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift));
- __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset));
- __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(r3, r3, Operand(mask));
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ mov(r4, Operand(cache_keys));
- __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and move r4 to next entry.
- __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
- __ cmp(r0, r5);
- __ b(ne, &try_next_entry);
- __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
- __ cmp(key, r5);
- __ b(eq, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- // Last entry: Load map and move r4 to name.
- __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
- __ cmp(r0, r5);
- __ b(ne, &slow);
- __ ldr(r5, MemOperand(r4));
- __ cmp(key, r5);
- __ b(ne, &slow);
- // Get field offset.
- // r0 : receiver's map
- // r3 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ mov(r4, Operand(cache_field_offsets));
- if (i != 0) {
- __ add(r3, r3, Operand(i));
- }
- __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
- __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset));
- __ sub(r5, r5, r6, SetCC);
- __ b(ge, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
+ if (FLAG_vector_ics) {
+ // When vector ics are in use, the handlers in the stub cache expect a
+ // vector and slot. Since we won't change the IC from any downstream
+ // misses, a dummy vector can be used.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r4, r5, r6, r9));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ mov(slot, Operand(Smi::FromInt(int_slot)));
}
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset));
- __ add(r6, r6, r5); // Index from start of object.
- __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- r4, r3);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- r4, r3);
- __ Ret();
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::KEYED_LOAD_IC, flags, false, receiver, key, r4, r5, r6, r9);
+ // Cache miss.
+ GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
@@ -760,7 +722,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -815,7 +777,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// r0: value.
// r1: key.
// r2: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc
index b44702ba3d..c5c0b7057e 100644
--- a/deps/v8/src/ic/arm/ic-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/ic-compiler-arm.cc
@@ -15,12 +15,12 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister());
- __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ mov(r0, Operand(Smi::FromInt(language_mode)));
__ Push(r0);
// Do tail-call to runtime routine.
@@ -32,7 +32,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -59,7 +59,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
}
Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target);
// Polymorphic keyed stores may use the map register
@@ -67,17 +67,16 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
- int receiver_count = types->length();
+ int receiver_count = maps->length();
int number_of_handled_maps = 0;
__ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
+ Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2());
- if (type->Is(HeapType::Number())) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 1c28bf51a2..12a2401294 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -111,7 +111,8 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Register scratch, Label* miss) {
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
- __ Mov(scratch, Operand(cell));
+ Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
__ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
}
@@ -121,17 +122,11 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
-
- __ Push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ Mov(scratch, Operand(interceptor));
- __ Push(scratch, receiver, holder);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+
+ __ Push(name, receiver, holder);
}
@@ -146,19 +141,21 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
Handle<Map> receiver_map, Register receiver, Register scratch,
- bool is_store, int argc, Register* values) {
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!AreAliased(accessor_holder, scratch));
DCHECK(!AreAliased(receiver, scratch));
MacroAssembler::PushPopQueue queue(masm);
queue.Queue(receiver);
// Write the arguments to the stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!AreAliased(receiver, scratch, arg));
- queue.Queue(arg);
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch.is(store_parameter));
+ queue.Queue(store_parameter);
}
queue.PushQueued();
@@ -166,20 +163,30 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
// Abi for CallApiFunctionStub.
Register callee = x0;
- Register call_data = x4;
+ Register data = x4;
Register holder = x2;
Register api_function_address = x1;
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
+
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Mov(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ LoadObject(holder, api_holder);
+ __ Ldr(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ Ldr(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
+ __ Ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -187,23 +194,19 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ LoadObject(callee, function);
-
bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ LoadObject(call_data, api_call_info);
- __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ LoadObject(call_data, call_data_obj);
+ __ Ldr(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ldr(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ Ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
@@ -214,37 +217,40 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ Mov(api_function_address, ref);
// Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
__ TailCallStub(&stub);
}
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- lr : return address
// -----------------------------------
Label miss;
-
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ Push(value());
- if (!setter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!AreAliased(holder, scratch));
+ DCHECK(!AreAliased(receiver, scratch));
+ DCHECK(!AreAliased(value(), scratch));
// Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ Ldr(receiver,
+ __ Ldr(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ Push(receiver, value());
ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(x1, holder, accessor_index, ACCESSOR_SETTER);
+ __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -262,23 +268,26 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- if (!getter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!AreAliased(holder, scratch));
+ DCHECK(!AreAliased(receiver, scratch));
// Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ Ldr(receiver,
+ __ Ldr(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ Push(receiver);
ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(x1, holder, accessor_index, ACCESSOR_GETTER);
+ __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -427,13 +436,17 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
- __ Ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ Ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
- __ CompareMap(scratch1(), it.Current());
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
it.Advance();
if (it.Done()) {
__ B(ne, miss_label);
@@ -450,7 +463,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+ Handle<Map> receiver_map = map();
// object_reg and holder_reg registers can alias.
DCHECK(!AreAliased(object_reg, scratch1, scratch2));
@@ -461,8 +474,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
@@ -606,12 +619,15 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ Push(receiver());
- if (heap()->InNewSpace(callback->data())) {
- __ Mov(scratch3(), Operand(callback));
- __ Ldr(scratch3(),
- FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ Mov(scratch3(), Operand(data));
} else {
- __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch3(), cell);
}
__ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
__ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
@@ -730,7 +746,14 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
// receiver() and holder_reg can alias.
DCHECK(!AreAliased(receiver(), scratch1(), scratch2(), value()));
DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
- __ Mov(scratch1(), Operand(callback));
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ __ Mov(scratch1(), Operand(callback));
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ Mov(scratch1(), Operand(cell));
+ }
__ Mov(scratch2(), Operand(name));
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index a01015c186..bae6ac33cf 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -151,63 +151,77 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
//
-// receiver - holds the receiver on entry.
-// Unchanged unless 'result' is the same register.
+// receiver - holds the receiver on entry.
+// Unchanged unless 'result' is the same register.
//
-// key - holds the smi key on entry.
-// Unchanged unless 'result' is the same register.
+// key - holds the smi key on entry.
+// Unchanged unless 'result' is the same register.
//
-// elements - holds the elements of the receiver on exit.
+// elements - holds the elements of the receiver and its prototypes. Clobbered.
//
-// elements_map - holds the elements map on exit if the not_fast_array branch is
-// taken. Otherwise, this is used as a scratch register.
-//
-// result - holds the result on exit if the load succeeded.
-// Allowed to be the the same as 'receiver' or 'key'.
-// Unchanged on bailout so 'receiver' and 'key' can be safely
-// used by further computation.
+// result - holds the result on exit if the load succeeded.
+// Allowed to be the the same as 'receiver' or 'key'.
+// Unchanged on bailout so 'receiver' and 'key' can be safely
+// used by further computation.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
- Register elements_map, Register scratch2,
- Register result, Label* not_fast_array,
- Label* slow) {
- DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
+ Register scratch1, Register scratch2,
+ Register result, Label* slow) {
+ DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2));
+
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, return_undefined;
// Check for fast array.
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
- not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
-
- // The elements_map register is only used for the not_fast_array path, which
- // was handled above. From this point onward it is a scratch register.
- Register scratch1 = elements_map;
+ __ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Cmp(key, scratch1);
- __ B(hs, slow);
-
+ __ B(lo, &in_bounds);
+
+ // Out of bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ __ Cmp(key, Operand(Smi::FromInt(0)));
+ __ B(lt, slow); // Negative keys can't take the fast OOB path.
+ __ Bind(&check_prototypes);
+ __ Ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Bind(&check_next_prototype);
+ __ Ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
+ // scratch2: current prototype
+ __ JumpIfRoot(scratch2, Heap::kNullValueRootIndex, &return_undefined);
+ __ Ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
+ __ Ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ // elements: elements of current prototype
+ // scratch2: map of current prototype
+ __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
+ __ B(lo, slow);
+ __ Ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
+ __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, slow);
+ __ Tbnz(scratch1, Map::kHasIndexedInterceptor, slow);
+ __ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex, slow);
+ __ B(&check_next_prototype);
+
+ __ Bind(&return_undefined);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&in_bounds);
// Fast case: Do the load.
__ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ SmiUntag(scratch2, key);
__ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, &check_prototypes);
// Move the value to the result register.
// 'result' can alias with 'receiver' or 'key' but these two must be
// preserved if we jump to 'slow'.
__ Mov(result, scratch2);
+ __ Bind(&done);
}
@@ -480,7 +494,7 @@ static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
__ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
- result, NULL, slow);
+ result, slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
scratch1, scratch2);
__ Ret();
@@ -513,94 +527,33 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
Map::kHasNamedInterceptor, slow);
- // If the receiver is a fast-case object, check the keyed lookup cache.
- // Otherwise probe the dictionary.
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
__ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
__ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
- // We keep the map of the receiver in scratch1.
- Register receiver_map = scratch1;
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
- __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(scratch2, scratch2, mask);
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ Mov(scratch3, cache_keys);
- __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and make scratch3 pointing to the next entry.
- __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
- __ Cmp(receiver_map, scratch4);
- __ B(ne, &try_next_entry);
- __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
- __ Cmp(key, scratch4);
- __ B(eq, &hit_on_nth_entry[i]);
- __ Bind(&try_next_entry);
- }
-
- // Last entry.
- __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
- __ Cmp(receiver_map, scratch4);
- __ B(ne, slow);
- __ Ldr(scratch4, MemOperand(scratch3));
- __ Cmp(key, scratch4);
- __ B(ne, slow);
-
- // Get field offset.
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ Bind(&hit_on_nth_entry[i]);
- __ Mov(scratch3, cache_field_offsets);
- if (i != 0) {
- __ Add(scratch2, scratch2, i);
- }
- __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
- __ Ldrb(scratch5,
- FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
- __ Subs(scratch4, scratch4, scratch5);
- __ B(ge, &property_array_property);
- if (i != 0) {
- __ B(&load_in_object_property);
- }
+ if (FLAG_vector_ics) {
+ // When vector ics are in use, the handlers in the stub cache expect a
+ // vector and slot. Since we won't change the IC from any downstream
+ // misses, a dummy vector can be used.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ Mov(slot, Operand(Smi::FromInt(int_slot)));
}
- // Load in-object property.
- __ Bind(&load_in_object_property);
- __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
- __ Add(scratch5, scratch5, scratch4); // Index from start of object.
- __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
- __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- scratch1, scratch2);
- __ Ret();
-
- // Load property array property.
- __ Bind(&property_array_property);
- __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- scratch1, scratch2);
- __ Ret();
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
+ false, receiver, key, scratch1,
+ scratch2, scratch3, scratch4);
+ // Cache miss.
+ KeyedLoadIC::GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it exists.
__ Bind(&probe_dictionary);
@@ -615,7 +568,7 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name;
@@ -639,7 +592,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ Bind(&check_name);
GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
- GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
+ GenerateKeyedLoadWithNameKey(masm, key, receiver, x4, x5, x6, x7, x3, &slow);
__ Bind(&index_name);
__ IndexFromHash(x3, key);
@@ -794,7 +747,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
Label slow;
Label array;
@@ -849,7 +802,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// x0: value
// x1: key
// x2: receiver
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
index a3d0d481fe..08ce4cba21 100644
--- a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
@@ -14,14 +14,14 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
ASM_LOCATION("PropertyICCompiler::GenerateRuntimeSetProperty");
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister());
- __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Mov(x10, Smi::FromInt(language_mode));
__ Push(x10);
// Do tail-call to runtime routine.
@@ -33,7 +33,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -59,7 +59,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
}
Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target);
// Polymorphic keyed stores may use the map register
@@ -67,18 +67,17 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
__ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
+ int receiver_count = maps->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
+ Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2());
Label try_next;
__ B(ne, &try_next);
- if (type->Is(HeapType::Number())) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ Bind(&number_case);
}
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 7ef1b7ed82..85dc01acf0 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -16,7 +16,8 @@ CallOptimization::CallOptimization(Handle<JSFunction> function) {
Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
- Handle<Map> object_map, HolderLookup* holder_lookup) const {
+ Handle<Map> object_map, HolderLookup* holder_lookup,
+ int* holder_depth_in_prototype_chain) const {
DCHECK(is_simple_api_call());
if (!object_map->IsJSObjectMap()) {
*holder_lookup = kHolderNotFound;
@@ -27,13 +28,16 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
*holder_lookup = kHolderIsReceiver;
return Handle<JSObject>::null();
}
- while (true) {
+ for (int depth = 1; true; depth++) {
if (!object_map->prototype()->IsJSObject()) break;
Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
if (!prototype->map()->is_hidden_prototype()) break;
object_map = handle(prototype->map());
if (expected_receiver_type_->IsTemplateFor(*object_map)) {
*holder_lookup = kHolderFound;
+ if (holder_depth_in_prototype_chain != NULL) {
+ *holder_depth_in_prototype_chain = depth;
+ }
return prototype;
}
}
@@ -45,8 +49,14 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
Handle<JSObject> holder) const {
DCHECK(is_simple_api_call());
- if (!receiver->IsJSObject()) return false;
- Handle<Map> map(JSObject::cast(*receiver)->map());
+ if (!receiver->IsHeapObject()) return false;
+ Handle<Map> map(HeapObject::cast(*receiver)->map());
+ return IsCompatibleReceiverMap(map, holder);
+}
+
+
+bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
+ Handle<JSObject> holder) const {
HolderLookup holder_lookup;
Handle<JSObject> api_holder = LookupHolderOfExpectedType(map, &holder_lookup);
switch (holder_lookup) {
@@ -92,19 +102,11 @@ void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
// Require a C++ callback.
if (info->call_code()->IsUndefined()) return;
- api_call_info_ =
- Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
+ api_call_info_ = handle(CallHandlerInfo::cast(info->call_code()));
- // Accept signatures that either have no restrictions at all or
- // only have restrictions on the receiver.
if (!info->signature()->IsUndefined()) {
- Handle<SignatureInfo> signature =
- Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
- if (!signature->args()->IsUndefined()) return;
- if (!signature->receiver()->IsUndefined()) {
- expected_receiver_type_ = Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(signature->receiver()));
- }
+ expected_receiver_type_ =
+ handle(FunctionTemplateInfo::cast(info->signature()));
}
is_simple_api_call_ = true;
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index 99494fa3ba..01947d7fed 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -38,12 +38,17 @@ class CallOptimization BASE_EMBEDDED {
enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
Handle<JSObject> LookupHolderOfExpectedType(
- Handle<Map> receiver_map, HolderLookup* holder_lookup) const;
+ Handle<Map> receiver_map, HolderLookup* holder_lookup,
+ int* holder_depth_in_prototype_chain = NULL) const;
// Check if the api holder is between the receiver and the holder.
bool IsCompatibleReceiver(Handle<Object> receiver,
Handle<JSObject> holder) const;
+ // Check if the api holder is between the receiver and the holder.
+ bool IsCompatibleReceiverMap(Handle<Map> receiver_map,
+ Handle<JSObject> holder) const;
+
private:
void Initialize(Handle<JSFunction> function);
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index ae977c3915..acf380fbee 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -26,9 +26,8 @@ Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
- Handle<Name> name, Handle<HeapType> type) {
+ Handle<Name> name, Handle<Map> receiver_map) {
Isolate* isolate = name->GetIsolate();
- Handle<Map> receiver_map = IC::TypeToMap(*type, isolate);
if (receiver_map->prototype()->IsNull()) {
// TODO(jkummerow/verwaest): If there is no prototype and the property
// is nonexistent, introduce a builtin to handle this (fast properties
@@ -37,7 +36,7 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
}
CacheHolderFlag flag;
Handle<Map> stub_holder_map =
- IC::GetHandlerCacheHolder(*type, false, isolate, &flag);
+ IC::GetHandlerCacheHolder(receiver_map, false, isolate, &flag);
// If no dictionary mode objects are present in the prototype chain, the load
// nonexistent IC stub can be shared for all names for a given map and we use
@@ -62,7 +61,7 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST);
if (!handler.is_null()) return handler;
- NamedLoadHandlerCompiler compiler(isolate, type, last, flag);
+ NamedLoadHandlerCompiler compiler(isolate, receiver_map, last, flag);
handler = compiler.CompileLoadNonexistent(cache_name);
Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
return handler;
@@ -75,15 +74,13 @@ Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, *name));
+#ifdef DEBUG
+ code->VerifyEmbeddedObjects();
+#endif
return code;
}
-void PropertyHandlerCompiler::set_type_for_object(Handle<Object> object) {
- type_ = IC::CurrentTypeOf(object, isolate());
-}
-
-
#define __ ACCESS_MASM(masm())
@@ -92,13 +89,13 @@ Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
Label* miss) {
PrototypeCheckType check_type = CHECK_ALL_MAPS;
int function_index = -1;
- if (type()->Is(HeapType::String())) {
+ if (map()->instance_type() < FIRST_NONSTRING_TYPE) {
function_index = Context::STRING_FUNCTION_INDEX;
- } else if (type()->Is(HeapType::Symbol())) {
+ } else if (map()->instance_type() == SYMBOL_TYPE) {
function_index = Context::SYMBOL_FUNCTION_INDEX;
- } else if (type()->Is(HeapType::Number())) {
+ } else if (map()->instance_type() == HEAP_NUMBER_TYPE) {
function_index = Context::NUMBER_FUNCTION_INDEX;
- } else if (type()->Is(HeapType::Boolean())) {
+ } else if (*map() == isolate()->heap()->boolean_map()) {
function_index = Context::BOOLEAN_FUNCTION_INDEX;
} else {
check_type = SKIP_RECEIVER;
@@ -109,7 +106,8 @@ Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
scratch1(), miss);
Object* function = isolate()->native_context()->get(function_index);
Object* prototype = JSFunction::cast(function)->instance_prototype();
- set_type_for_object(handle(prototype, isolate()));
+ Handle<Map> map(JSObject::cast(prototype)->map());
+ set_map(map);
object_reg = scratch1();
}
@@ -152,7 +150,7 @@ void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
Handle<Map> last_map;
if (holder().is_null()) {
holder_reg = receiver();
- last_map = IC::TypeToMap(*type(), isolate());
+ last_map = map();
// If |type| has null as its prototype, |holder()| is
// Handle<JSObject>::null().
DCHECK(last_map->prototype() == isolate()->heap()->null_value());
@@ -165,7 +163,7 @@ void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
if (last_map->IsJSGlobalObjectMap()) {
Handle<JSGlobalObject> global =
holder().is_null()
- ? Handle<JSGlobalObject>::cast(type()->AsConstant()->Value())
+ ? Handle<JSGlobalObject>::cast(isolate()->global_object())
: Handle<JSGlobalObject>::cast(holder());
GenerateCheckPropertyCell(masm(), global, name, scratch1, miss);
} else {
@@ -229,12 +227,12 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
- Handle<Name> name, const CallOptimization& call_optimization) {
+ Handle<Name> name, const CallOptimization& call_optimization,
+ int accessor_index) {
DCHECK(call_optimization.is_simple_api_call());
- Frontend(name);
- Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
- GenerateFastApiCall(masm(), call_optimization, receiver_map, receiver(),
- scratch1(), false, 0, NULL);
+ Register holder = Frontend(name);
+ GenerateApiAccessorCall(masm(), call_optimization, map(), receiver(),
+ scratch2(), false, no_reg, holder, accessor_index);
return GetCode(kind(), Code::FAST, name);
}
@@ -270,7 +268,7 @@ void NamedLoadHandlerCompiler::InterceptorVectorSlotPop(Register holder_reg,
Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
LookupIterator* it) {
- // So far the most popular follow ups for interceptor loads are FIELD and
+ // So far the most popular follow ups for interceptor loads are DATA and
// ExecutableAccessorInfo, so inline only them. Other cases may be added
// later.
bool inline_followup = false;
@@ -284,17 +282,29 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
break;
case LookupIterator::DATA:
inline_followup =
- it->property_details().type() == FIELD && !it->is_dictionary_holder();
+ it->property_details().type() == DATA && !it->is_dictionary_holder();
break;
case LookupIterator::ACCESSOR: {
Handle<Object> accessors = it->GetAccessors();
- inline_followup = accessors->IsExecutableAccessorInfo();
- if (!inline_followup) break;
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
- inline_followup = info->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(
- isolate(), info, type());
+ if (accessors->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(accessors);
+ inline_followup = info->getter() != NULL &&
+ ExecutableAccessorInfo::IsCompatibleReceiverMap(
+ isolate(), info, map());
+ } else if (accessors->IsAccessorPair()) {
+ Handle<JSObject> property_holder(it->GetHolder<JSObject>());
+ Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
+ isolate());
+ if (!getter->IsJSFunction()) break;
+ if (!property_holder->HasFastProperties()) break;
+ auto function = Handle<JSFunction>::cast(getter);
+ CallOptimization call_optimization(function);
+ Handle<Map> receiver_map = map();
+ inline_followup = call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiverMap(
+ receiver_map, property_holder);
+ }
}
}
@@ -319,7 +329,8 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
LookupIterator* it, Register interceptor_reg) {
Handle<JSObject> real_named_property_holder(it->GetHolder<JSObject>());
- set_type_for_object(holder());
+ Handle<Map> holder_map(holder()->map());
+ set_map(holder_map);
set_holder(real_named_property_holder);
Label miss;
@@ -337,25 +348,35 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::DATA: {
- DCHECK_EQ(FIELD, it->property_details().type());
+ DCHECK_EQ(DATA, it->property_details().type());
__ Move(receiver(), reg);
LoadFieldStub stub(isolate(), it->GetFieldIndex());
GenerateTailCall(masm(), stub.GetCode());
break;
}
case LookupIterator::ACCESSOR:
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(it->GetAccessors());
- DCHECK_NE(NULL, info->getter());
- GenerateLoadCallback(reg, info);
+ if (it->GetAccessors()->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(it->GetAccessors());
+ DCHECK_NOT_NULL(info->getter());
+ GenerateLoadCallback(reg, info);
+ } else {
+ auto function = handle(JSFunction::cast(
+ AccessorPair::cast(*it->GetAccessors())->getter()));
+ CallOptimization call_optimization(function);
+ GenerateApiAccessorCall(masm(), call_optimization, holder_map,
+ receiver(), scratch2(), false, no_reg, reg,
+ it->GetAccessorIndex());
+ }
}
}
Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
- Handle<Name> name, Handle<JSFunction> getter) {
- Frontend(name);
- GenerateLoadViaGetter(masm(), type(), receiver(), getter);
+ Handle<Name> name, int accessor_index, int expected_arguments) {
+ Register holder = Frontend(name);
+ GenerateLoadViaGetter(masm(), map(), receiver(), holder, accessor_index,
+ expected_arguments, scratch2());
return GetCode(kind(), Code::FAST, name);
}
@@ -392,7 +413,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
DCHECK(!transition->is_access_check_needed());
// Call to respective StoreTransitionStub.
- if (details.type() == CONSTANT) {
+ if (details.type() == DATA_CONSTANT) {
GenerateRestoreMap(transition, scratch2(), &miss);
DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
Register map_reg = StoreTransitionDescriptor::MapRegister();
@@ -441,9 +462,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
- Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
- Frontend(name);
- GenerateStoreViaSetter(masm(), type(), receiver(), setter);
+ Handle<JSObject> object, Handle<Name> name, int accessor_index,
+ int expected_arguments) {
+ Register holder = Frontend(name);
+ GenerateStoreViaSetter(masm(), map(), receiver(), holder, accessor_index,
+ expected_arguments, scratch2());
return GetCode(kind(), Code::FAST, name);
}
@@ -451,11 +474,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
- const CallOptimization& call_optimization) {
- Frontend(name);
- Register values[] = {value()};
- GenerateFastApiCall(masm(), call_optimization, handle(object->map()),
- receiver(), scratch1(), true, 1, values);
+ const CallOptimization& call_optimization, int accessor_index) {
+ Register holder = Frontend(name);
+ GenerateApiAccessorCall(masm(), call_optimization, handle(object->map()),
+ receiver(), scratch2(), true, value(), holder,
+ accessor_index);
return GetCode(kind(), Code::FAST, name);
}
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index bed65775c8..bd3f788e38 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -21,11 +21,10 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
CacheHolderFlag cache_holder, Code::StubType type);
protected:
- PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind,
- Handle<HeapType> type, Handle<JSObject> holder,
- CacheHolderFlag cache_holder)
+ PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind, Handle<Map> map,
+ Handle<JSObject> holder, CacheHolderFlag cache_holder)
: PropertyAccessCompiler(isolate, kind, cache_holder),
- type_(type),
+ map_(map),
holder_(holder) {}
virtual ~PropertyHandlerCompiler() {}
@@ -54,11 +53,13 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
void DiscardVectorAndSlot();
// TODO(verwaest): Make non-static.
- static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver,
- Register scratch, bool is_store, int argc,
- Register* values);
+ static void GenerateApiAccessorCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver, Register scratch,
+ bool is_store, Register store_parameter,
+ Register accessor_holder,
+ int accessor_index);
// Helper function used to check that the dictionary doesn't contain
// the property. This function may return false negatives, so miss_label
@@ -97,23 +98,23 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
PrototypeCheckType check = CHECK_ALL_MAPS);
Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name);
- void set_type_for_object(Handle<Object> object);
void set_holder(Handle<JSObject> holder) { holder_ = holder; }
- Handle<HeapType> type() const { return type_; }
+ Handle<Map> map() const { return map_; }
+ void set_map(Handle<Map> map) { map_ = map; }
Handle<JSObject> holder() const { return holder_; }
private:
- Handle<HeapType> type_;
+ Handle<Map> map_;
Handle<JSObject> holder_;
};
class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
public:
- NamedLoadHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
+ NamedLoadHandlerCompiler(Isolate* isolate, Handle<Map> map,
Handle<JSObject> holder,
CacheHolderFlag cache_holder)
- : PropertyHandlerCompiler(isolate, Code::LOAD_IC, type, holder,
+ : PropertyHandlerCompiler(isolate, Code::LOAD_IC, map, holder,
cache_holder) {}
virtual ~NamedLoadHandlerCompiler() {}
@@ -124,7 +125,8 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
Handle<ExecutableAccessorInfo> callback);
Handle<Code> CompileLoadCallback(Handle<Name> name,
- const CallOptimization& call_optimization);
+ const CallOptimization& call_optimization,
+ int accessor_index);
Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
@@ -133,23 +135,24 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
// inlined.
Handle<Code> CompileLoadInterceptor(LookupIterator* it);
- Handle<Code> CompileLoadViaGetter(Handle<Name> name,
- Handle<JSFunction> getter);
+ Handle<Code> CompileLoadViaGetter(Handle<Name> name, int accessor_index,
+ int expected_arguments);
Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name,
bool is_configurable);
// Static interface
static Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
- Handle<HeapType> type);
+ Handle<Map> map);
- static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<HeapType> type,
- Register receiver,
- Handle<JSFunction> getter);
+ static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<Map> map,
+ Register receiver, Register holder,
+ int accessor_index, int expected_arguments,
+ Register scratch);
static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
- GenerateLoadViaGetter(masm, Handle<HeapType>::null(), no_reg,
- Handle<JSFunction>());
+ GenerateLoadViaGetter(masm, Handle<Map>::null(), no_reg, no_reg, -1, -1,
+ no_reg);
}
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
@@ -163,10 +166,9 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
// PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
// LoadWithInterceptor.
static const int kInterceptorArgsNameIndex = 0;
- static const int kInterceptorArgsInfoIndex = 1;
- static const int kInterceptorArgsThisIndex = 2;
- static const int kInterceptorArgsHolderIndex = 3;
- static const int kInterceptorArgsLength = 4;
+ static const int kInterceptorArgsThisIndex = 1;
+ static const int kInterceptorArgsHolderIndex = 2;
+ static const int kInterceptorArgsLength = 3;
protected:
virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
@@ -210,9 +212,9 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
public:
- explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
+ explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<Map> map,
Handle<JSObject> holder)
- : PropertyHandlerCompiler(isolate, Code::STORE_IC, type, holder,
+ : PropertyHandlerCompiler(isolate, Code::STORE_IC, map, holder,
kCacheOnReceiver) {}
virtual ~NamedStoreHandlerCompiler() {}
@@ -223,18 +225,21 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback);
Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
- const CallOptimization& call_optimization);
+ const CallOptimization& call_optimization,
+ int accessor_index);
Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
- Handle<JSFunction> setter);
+ int accessor_index,
+ int expected_arguments);
Handle<Code> CompileStoreInterceptor(Handle<Name> name);
- static void GenerateStoreViaSetter(MacroAssembler* masm,
- Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter);
+ static void GenerateStoreViaSetter(MacroAssembler* masm, Handle<Map> map,
+ Register receiver, Register holder,
+ int accessor_index, int expected_arguments,
+ Register scratch);
static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
- GenerateStoreViaSetter(masm, Handle<HeapType>::null(), no_reg,
- Handle<JSFunction>());
+ GenerateStoreViaSetter(masm, Handle<Map>::null(), no_reg, no_reg, -1, -1,
+ no_reg);
}
static void GenerateSlow(MacroAssembler* masm);
@@ -278,8 +283,8 @@ class ElementHandlerCompiler : public PropertyHandlerCompiler {
public:
explicit ElementHandlerCompiler(Isolate* isolate)
: PropertyHandlerCompiler(isolate, Code::KEYED_LOAD_IC,
- Handle<HeapType>::null(),
- Handle<JSObject>::null(), kCacheOnReceiver) {}
+ Handle<Map>::null(), Handle<JSObject>::null(),
+ kCacheOnReceiver) {}
virtual ~ElementHandlerCompiler() {}
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 90512e9bc8..94b48be274 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -17,22 +17,26 @@ namespace internal {
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- if (!getter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ mov(receiver,
+ __ mov(scratch,
FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ push(receiver);
ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
+ __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
@@ -137,42 +141,53 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
// when api call ICs are generated in hydrogen.
-void PropertyHandlerCompiler::GenerateFastApiCall(
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
+ Handle<Map> receiver_map, Register receiver, Register scratch,
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!accessor_holder.is(scratch));
// Copy return value.
- __ pop(scratch_in);
+ __ pop(scratch);
// receiver
__ push(receiver);
// Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ push(arg);
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch.is(store_parameter));
+ __ push(store_parameter);
}
- __ push(scratch_in);
+ __ push(scratch);
// Stack now matches JSFunction abi.
DCHECK(optimization.is_simple_api_call());
// Abi for CallApiFunctionStub.
- Register callee = eax;
- Register call_data = ebx;
+ Register callee = edi;
+ Register data = ebx;
Register holder = ecx;
Register api_function_address = edx;
- Register scratch = edi; // scratch_in is no longer valid.
+ scratch = no_reg;
+
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ LoadHeapObject(holder, api_holder);
+ __ mov(holder, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ mov(holder, FieldOperand(holder, HeapObject::kMapOffset));
+ __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
+ }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -180,23 +195,17 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ LoadHeapObject(callee, function);
-
bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ mov(scratch, api_call_info);
- __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
- __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
+ __ mov(data, Immediate(isolate->factory()->undefined_value()));
} else {
- __ mov(call_data, call_data_obj);
+ __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
@@ -204,7 +213,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
__ TailCallStub(&stub);
}
@@ -217,21 +226,18 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Register scratch, Label* miss) {
Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
- Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
- if (masm->serializer_enabled()) {
- __ mov(scratch, Immediate(cell));
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(the_hole));
- } else {
- __ cmp(Operand::ForCell(cell), Immediate(the_hole));
- }
+ Factory* factory = masm->isolate()->factory();
+ Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(factory->the_hole_value()));
__ j(not_equal, miss);
}
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- esp[0] : return address
// -----------------------------------
@@ -241,18 +247,22 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Save value register, so we can restore it later.
__ push(value());
- if (!setter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ mov(receiver,
+ if (map->IsJSGlobalObjectMap()) {
+ __ mov(scratch,
FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ push(receiver);
__ push(value());
ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
+ __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
@@ -274,16 +284,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
__ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Immediate(interceptor));
- __ push(scratch);
__ push(receiver);
__ push(holder);
}
@@ -385,12 +389,17 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
Label do_store;
+ __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
while (true) {
- __ CompareMap(value_reg, it.Current());
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
@@ -407,7 +416,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+ Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -419,8 +428,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant())
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
+ }
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -547,12 +557,17 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
__ push(receiver()); // receiver
// Push data from ExecutableAccessorInfo.
- if (isolate()->heap()->InNewSpace(callback->data())) {
- DCHECK(!scratch2().is(reg));
- __ mov(scratch2(), Immediate(callback));
- __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ push(Immediate(data));
} else {
- __ push(Immediate(Handle<Object>(callback->data(), isolate())));
+ DCHECK(!scratch2().is(reg));
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch2(), cell);
+ __ push(scratch2());
}
__ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
// ReturnValue default value
@@ -677,7 +692,14 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ pop(scratch1()); // remove the return address
__ push(receiver());
__ push(holder_reg);
- __ Push(callback);
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ __ Push(callback);
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ Push(cell);
+ }
__ Push(name);
__ push(value());
__ push(scratch1()); // restore return address
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
index f43b641134..6788bc7a88 100644
--- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -15,8 +15,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
// Return address is on the stack.
DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
!ebx.is(StoreDescriptor::NameRegister()) &&
@@ -25,7 +25,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
__ push(StoreDescriptor::ReceiverRegister());
__ push(StoreDescriptor::NameRegister());
__ push(StoreDescriptor::ValueRegister());
- __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ push(Immediate(Smi::FromInt(language_mode)));
__ push(ebx); // return address
// Do tail-call to runtime routine.
@@ -36,7 +36,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
#undef __
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -63,7 +63,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
}
Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target);
// Polymorphic keyed stores may use the map register
@@ -71,16 +71,15 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
+ int receiver_count = maps->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
+ Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2());
- if (type->Is(HeapType::Number())) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 9822f26ced..54fd053eaf 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -169,40 +169,65 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register scratch,
- Register result, Label* not_fast_array,
- Label* out_of_range) {
+ Register scratch2, Register result,
+ Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
// Scratch registers:
// scratch - used to hold elements of the receiver and the loaded value.
+ // scratch2 - holds maps and prototypes during prototype chain check.
// result - holds the result on exit if the load succeeds and
// we fall through.
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, return_undefined;
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CheckMap(scratch, masm->isolate()->factory()->fixed_array_map(),
- not_fast_array, DONT_DO_SMI_CHECK);
- } else {
- __ AssertFastElements(scratch);
- }
+ __ AssertFastElements(scratch);
+
// Check that the key (index) is within bounds.
__ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(above_equal, out_of_range);
+ __ j(below, &in_bounds);
+ // Out-of-bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ __ cmp(key, 0);
+ __ j(less, slow); // Negative keys can't take the fast OOB path.
+ __ bind(&check_prototypes);
+ __ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&check_next_prototype);
+ __ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
+ // scratch2: current prototype
+ __ cmp(scratch2, masm->isolate()->factory()->null_value());
+ __ j(equal, &return_undefined);
+ __ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
+ __ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
+ // scratch: elements of current prototype
+ // scratch2: map of current prototype
+ __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
+ __ j(below, slow);
+ __ test_b(
+ FieldOperand(scratch2, Map::kBitFieldOffset),
+ (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor));
+ __ j(not_zero, slow);
+ __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
+ __ j(not_equal, slow);
+ __ jmp(&check_next_prototype);
+
+ __ bind(&return_undefined);
+ __ mov(result, masm->isolate()->factory()->undefined_value());
+ __ jmp(&done);
+
+ __ bind(&in_bounds);
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
__ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ mov(result, scratch);
- }
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ j(equal, &check_prototypes);
+ __ Move(result, scratch);
+ __ bind(&done);
}
@@ -305,7 +330,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -327,7 +352,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, eax, eax, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
Isolate* isolate = masm->isolate();
Counters* counters = isolate->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
@@ -367,95 +392,36 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
&slow);
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
__ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(isolate->factory()->hash_table_map()));
__ j(equal, &probe_dictionary);
- // The receiver's map is still in eax, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- if (FLAG_debug_code) {
- __ cmp(eax, FieldOperand(receiver, HeapObject::kMapOffset));
- __ Check(equal, kMapIsNoLongerInEax);
- }
- __ mov(ebx, eax); // Keep the map around for later.
- __ shr(eax, KeyedLookupCache::kMapHashShift);
- __ mov(edi, FieldOperand(key, String::kHashFieldOffset));
- __ shr(edi, String::kHashShift);
- __ xor_(eax, edi);
- __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ mov(edi, eax);
- __ shl(edi, kPointerSizeLog2 + 1);
- if (i != 0) {
- __ add(edi, Immediate(kPointerSize * i * 2));
- }
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &try_next_entry);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(equal, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
+ if (FLAG_vector_ics) {
+ // When vector ics are in use, the handlers in the stub cache expect a
+ // vector and slot. Since we won't change the IC from any downstream
+ // misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ isolate->factory()->keyed_load_dummy_vector());
+ int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
}
- __ lea(edi, Operand(eax, 1));
- __ shl(edi, kPointerSizeLog2 + 1);
- __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
+ false, receiver, key, ebx, edi);
- // Get field offset.
- // ebx : receiver's map
- // eax : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- if (i != 0) {
- __ add(eax, Immediate(i));
- }
- __ mov(edi,
- Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
- __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, eax);
- __ j(above_equal, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
+ if (FLAG_vector_ics) {
+ __ pop(VectorLoadICDescriptor::VectorRegister());
+ __ pop(VectorLoadICDescriptor::SlotRegister());
}
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(eax, edi);
- __ mov(eax, FieldOperand(receiver, eax, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(eax,
- FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
+ // Cache miss.
+ GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
@@ -646,7 +612,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
// Return address is on the stack.
Label slow, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
@@ -683,7 +649,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Slow case: call runtime.
__ bind(&slow);
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index e087acfcb0..08e0fa6e5a 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -25,30 +25,30 @@ Handle<Code> PropertyICCompiler::Find(Handle<Name> name,
}
-bool PropertyICCompiler::IncludesNumberType(TypeHandleList* types) {
- for (int i = 0; i < types->length(); ++i) {
- if (types->at(i)->Is(HeapType::Number())) return true;
+bool PropertyICCompiler::IncludesNumberMap(MapHandleList* maps) {
+ for (int i = 0; i < maps->length(); ++i) {
+ if (maps->at(i)->instance_type() == HEAP_NUMBER_TYPE) return true;
}
return false;
}
-Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<HeapType> type,
+Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<Map> map,
Handle<Code> handler,
Handle<Name> name,
IcCheckType check) {
- TypeHandleList types(1);
+ MapHandleList maps(1);
CodeHandleList handlers(1);
- types.Add(type);
+ maps.Add(map);
handlers.Add(handler);
Code::StubType stub_type = handler->type();
- return CompilePolymorphic(&types, &handlers, name, stub_type, check);
+ return CompilePolymorphic(&maps, &handlers, name, stub_type, check);
}
Handle<Code> PropertyICCompiler::ComputeMonomorphic(
- Code::Kind kind, Handle<Name> name, Handle<HeapType> type,
- Handle<Code> handler, ExtraICState extra_ic_state) {
+ Code::Kind kind, Handle<Name> name, Handle<Map> map, Handle<Code> handler,
+ ExtraICState extra_ic_state) {
Isolate* isolate = name->GetIsolate();
if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) ||
handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) {
@@ -56,7 +56,7 @@ Handle<Code> PropertyICCompiler::ComputeMonomorphic(
}
CacheHolderFlag flag;
- Handle<Map> stub_holder = IC::GetICCacheHolder(*type, isolate, &flag);
+ Handle<Map> stub_holder = IC::GetICCacheHolder(map, isolate, &flag);
if (kind == Code::KEYED_STORE_IC) {
// Always set the "property" bit.
extra_ic_state =
@@ -72,14 +72,14 @@ Handle<Code> PropertyICCompiler::ComputeMonomorphic(
// There are multiple string maps that all use the same prototype. That
// prototype cannot hold multiple handlers, one for each of the string maps,
// for a single name. Hence, turn off caching of the IC.
- bool can_be_cached = !type->Is(HeapType::String());
+ bool can_be_cached = map->instance_type() >= FIRST_NONSTRING_TYPE;
if (can_be_cached) {
ic = Find(name, stub_holder, kind, extra_ic_state, flag);
if (!ic.is_null()) return ic;
}
PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag);
- ic = ic_compiler.CompileMonomorphic(type, handler, name, PROPERTY);
+ ic = ic_compiler.CompileMonomorphic(map, handler, name, PROPERTY);
if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
return ic;
@@ -98,9 +98,8 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic(
Handle<Code> stub = ComputeKeyedLoadMonomorphicHandler(receiver_map);
PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC);
- Handle<Code> code =
- compiler.CompileMonomorphic(HeapType::Class(receiver_map, isolate), stub,
- isolate->factory()->empty_string(), ELEMENT);
+ Handle<Code> code = compiler.CompileMonomorphic(
+ receiver_map, stub, isolate->factory()->empty_string(), ELEMENT);
Map::UpdateCodeCache(receiver_map, name, code);
return code;
@@ -133,11 +132,11 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, StrictMode strict_mode,
+ Handle<Map> receiver_map, LanguageMode language_mode,
KeyedAccessStoreMode store_mode) {
Isolate* isolate = receiver_map->GetIsolate();
ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
+ KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state);
@@ -256,7 +255,6 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
}
-// TODO(verwaest): Change this method so it takes in a TypeHandleList.
Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
MapHandleList* receiver_maps) {
Isolate* isolate = receiver_maps->at(0)->GetIsolate();
@@ -267,17 +265,13 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
Handle<Object> probe = cache->Lookup(receiver_maps, flags);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- TypeHandleList types(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); i++) {
- types.Add(HeapType::Class(receiver_maps->at(i), isolate));
- }
CodeHandleList handlers(receiver_maps->length());
ElementHandlerCompiler compiler(isolate);
compiler.CompileElementHandlers(receiver_maps, &handlers);
PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC);
Handle<Code> code = ic_compiler.CompilePolymorphic(
- &types, &handlers, isolate->factory()->empty_string(), Code::NORMAL,
- ELEMENT);
+ receiver_maps, &handlers, isolate->factory()->empty_string(),
+ Code::NORMAL, ELEMENT);
isolate->counters()->keyed_load_polymorphic_stubs()->Increment();
@@ -287,19 +281,19 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
Handle<Code> PropertyICCompiler::ComputePolymorphic(
- Code::Kind kind, TypeHandleList* types, CodeHandleList* handlers,
- int valid_types, Handle<Name> name, ExtraICState extra_ic_state) {
+ Code::Kind kind, MapHandleList* maps, CodeHandleList* handlers,
+ int valid_maps, Handle<Name> name, ExtraICState extra_ic_state) {
Handle<Code> handler = handlers->at(0);
- Code::StubType type = valid_types == 1 ? handler->type() : Code::NORMAL;
+ Code::StubType type = valid_maps == 1 ? handler->type() : Code::NORMAL;
DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC);
PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state);
- return ic_compiler.CompilePolymorphic(types, handlers, name, type, PROPERTY);
+ return ic_compiler.CompilePolymorphic(maps, handlers, name, type, PROPERTY);
}
Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
Isolate* isolate = receiver_maps->at(0)->GetIsolate();
DCHECK(store_mode == STANDARD_STORE ||
store_mode == STORE_AND_GROW_NO_TRANSITION ||
@@ -308,7 +302,7 @@ Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
Handle<PolymorphicCodeCache> cache =
isolate->factory()->polymorphic_code_cache();
ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
+ KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
Code::Flags flags =
Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
Handle<Object> probe = cache->Lookup(receiver_maps, flags);
@@ -358,8 +352,8 @@ Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
- GenerateRuntimeSetProperty(masm(), strict_mode);
+ LanguageMode language_mode = StoreIC::GetLanguageMode(extra_state);
+ GenerateRuntimeSetProperty(masm(), language_mode);
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
return code;
@@ -381,6 +375,9 @@ Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
+#ifdef DEBUG
+ code->VerifyEmbeddedObjects();
+#endif
return code;
}
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index dd898aeed3..d1bd7a1dfc 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -24,10 +24,9 @@ class PropertyICCompiler : public PropertyAccessCompiler {
ExtraICState extra_state);
static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name,
- Handle<HeapType> type,
- Handle<Code> handler,
+ Handle<Map> map, Handle<Code> handler,
ExtraICState extra_ic_state);
- static Handle<Code> ComputePolymorphic(Code::Kind kind, TypeHandleList* types,
+ static Handle<Code> ComputePolymorphic(Code::Kind kind, MapHandleList* maps,
CodeHandleList* handlers,
int number_of_valid_maps,
Handle<Name> name,
@@ -39,12 +38,12 @@ class PropertyICCompiler : public PropertyAccessCompiler {
static Handle<Code> ComputeKeyedLoadMonomorphic(Handle<Map> receiver_map);
static Handle<Code> ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, StrictMode strict_mode,
+ Handle<Map> receiver_map, LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps);
static Handle<Code> ComputeKeyedStorePolymorphic(
MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- StrictMode strict_mode);
+ LanguageMode language_mode);
// Compare nil
static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
@@ -54,7 +53,7 @@ class PropertyICCompiler : public PropertyAccessCompiler {
// TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
// and make the helpers private.
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode);
+ LanguageMode language_mode);
private:
@@ -76,11 +75,11 @@ class PropertyICCompiler : public PropertyAccessCompiler {
Handle<Code> CompileStoreGeneric(Code::Flags flags);
Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
- Handle<Code> CompileMonomorphic(Handle<HeapType> type, Handle<Code> handler,
+ Handle<Code> CompileMonomorphic(Handle<Map> map, Handle<Code> handler,
Handle<Name> name, IcCheckType check);
- Handle<Code> CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers, Handle<Name> name,
- Code::StubType type, IcCheckType check);
+ Handle<Code> CompilePolymorphic(MapHandleList* maps, CodeHandleList* handlers,
+ Handle<Name> name, Code::StubType type,
+ IcCheckType check);
Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
@@ -90,7 +89,7 @@ class PropertyICCompiler : public PropertyAccessCompiler {
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps);
- bool IncludesNumberType(TypeHandleList* types);
+ bool IncludesNumberMap(MapHandleList* maps);
Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name,
InlineCacheState state = MONOMORPHIC);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 58d7d4608f..45dd3476cf 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -106,11 +106,11 @@ void IC::SetTargetAtAddress(Address address, Code* target,
Code* old_target = GetTargetAtAddress(address, constant_pool);
#ifdef DEBUG
// STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
- // ICs as strict mode. The strict-ness of the IC must be preserved.
+ // ICs as language mode. The language mode of the IC must be preserved.
if (old_target->kind() == Code::STORE_IC ||
old_target->kind() == Code::KEYED_STORE_IC) {
- DCHECK(StoreIC::GetStrictMode(old_target->extra_ic_state()) ==
- StoreIC::GetStrictMode(target->extra_ic_state()));
+ DCHECK(StoreIC::GetLanguageMode(old_target->extra_ic_state()) ==
+ StoreIC::GetLanguageMode(target->extra_ic_state()));
}
#endif
Assembler::set_target_address_at(address, constant_pool,
@@ -140,16 +140,16 @@ void LoadIC::set_target(Code* code) {
void StoreIC::set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- DCHECK(GetStrictMode(code->extra_ic_state()) ==
- GetStrictMode(target()->extra_ic_state()));
+ // Language mode must be preserved across IC patching.
+ DCHECK(GetLanguageMode(code->extra_ic_state()) ==
+ GetLanguageMode(target()->extra_ic_state()));
IC::set_target(code);
}
void KeyedStoreIC::set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- DCHECK(GetStrictMode(code->extra_ic_state()) == strict_mode());
+ // Language mode must be preserved across IC patching.
+ DCHECK(GetLanguageMode(code->extra_ic_state()) == language_mode());
IC::set_target(code);
}
@@ -161,15 +161,15 @@ Code* IC::raw_target() const {
void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); }
-template <class TypeClass>
-JSFunction* IC::GetRootConstructor(TypeClass* type, Context* native_context) {
- if (type->Is(TypeClass::Boolean())) {
+JSFunction* IC::GetRootConstructor(Map* receiver_map, Context* native_context) {
+ Isolate* isolate = receiver_map->GetIsolate();
+ if (receiver_map == isolate->heap()->boolean_map()) {
return native_context->boolean_function();
- } else if (type->Is(TypeClass::Number())) {
+ } else if (receiver_map->instance_type() == HEAP_NUMBER_TYPE) {
return native_context->number_function();
- } else if (type->Is(TypeClass::String())) {
+ } else if (receiver_map->instance_type() < FIRST_NONSTRING_TYPE) {
return native_context->string_function();
- } else if (type->Is(TypeClass::Symbol())) {
+ } else if (receiver_map->instance_type() == SYMBOL_TYPE) {
return native_context->symbol_function();
} else {
return NULL;
@@ -177,15 +177,15 @@ JSFunction* IC::GetRootConstructor(TypeClass* type, Context* native_context) {
}
-Handle<Map> IC::GetHandlerCacheHolder(HeapType* type, bool receiver_is_holder,
- Isolate* isolate, CacheHolderFlag* flag) {
- Handle<Map> receiver_map = TypeToMap(type, isolate);
+Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
+ bool receiver_is_holder, Isolate* isolate,
+ CacheHolderFlag* flag) {
if (receiver_is_holder) {
*flag = kCacheOnReceiver;
return receiver_map;
}
Context* native_context = *isolate->native_context();
- JSFunction* builtin_ctor = GetRootConstructor(type, native_context);
+ JSFunction* builtin_ctor = GetRootConstructor(*receiver_map, native_context);
if (builtin_ctor != NULL) {
*flag = kCacheOnPrototypeReceiverIsPrimitive;
return handle(HeapObject::cast(builtin_ctor->instance_prototype())->map());
@@ -198,16 +198,16 @@ Handle<Map> IC::GetHandlerCacheHolder(HeapType* type, bool receiver_is_holder,
}
-Handle<Map> IC::GetICCacheHolder(HeapType* type, Isolate* isolate,
+Handle<Map> IC::GetICCacheHolder(Handle<Map> map, Isolate* isolate,
CacheHolderFlag* flag) {
Context* native_context = *isolate->native_context();
- JSFunction* builtin_ctor = GetRootConstructor(type, native_context);
+ JSFunction* builtin_ctor = GetRootConstructor(*map, native_context);
if (builtin_ctor != NULL) {
*flag = kCacheOnPrototype;
return handle(builtin_ctor->initial_map());
}
*flag = kCacheOnReceiver;
- return TypeToMap(type, isolate);
+ return map;
}
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index 9c883ad5e3..a38a27a34a 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -36,11 +36,18 @@ std::ostream& operator<<(std::ostream& os, const CallICState& s) {
}
+// static
+STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::FIRST_TOKEN;
+
+
+// static
+STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::LAST_TOKEN;
+
+
BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
: isolate_(isolate) {
op_ =
static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
- mode_ = OverwriteModeField::decode(extra_ic_state);
fixed_right_arg_ =
Maybe<int>(HasFixedRightArgField::decode(extra_ic_state),
1 << FixedRightArgValueField::decode(extra_ic_state));
@@ -58,8 +65,7 @@ BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
ExtraICState BinaryOpICState::GetExtraICState() const {
ExtraICState extra_ic_state =
- OpField::encode(op_ - FIRST_TOKEN) | OverwriteModeField::encode(mode_) |
- LeftKindField::encode(left_kind_) |
+ OpField::encode(op_ - FIRST_TOKEN) | LeftKindField::encode(left_kind_) |
ResultKindField::encode(result_kind_) |
HasFixedRightArgField::encode(fixed_right_arg_.has_value);
if (fixed_right_arg_.has_value) {
@@ -79,218 +85,124 @@ void BinaryOpICState::GenerateAheadOfTime(
// expensive at runtime. When solved we should be able to add most binops to
// the snapshot instead of hand-picking them.
// Generated list of commonly used stubs
-#define GENERATE(op, left_kind, right_kind, result_kind, mode) \
- do { \
- BinaryOpICState state(isolate, op, mode); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_.has_value = false; \
- state.right_kind_ = right_kind; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
+#define GENERATE(op, left_kind, right_kind, result_kind) \
+ do { \
+ BinaryOpICState state(isolate, op); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = false; \
+ state.right_kind_ = right_kind; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
} while (false)
- GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, INT32, INT32, INT32);
+ GENERATE(Token::ADD, INT32, INT32, NUMBER);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER);
+ GENERATE(Token::ADD, INT32, SMI, INT32);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER);
+ GENERATE(Token::ADD, SMI, INT32, INT32);
+ GENERATE(Token::ADD, SMI, INT32, NUMBER);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER);
+ GENERATE(Token::ADD, SMI, SMI, INT32);
+ GENERATE(Token::ADD, SMI, SMI, SMI);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32);
+ GENERATE(Token::BIT_AND, INT32, INT32, SMI);
+ GENERATE(Token::BIT_AND, INT32, SMI, INT32);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI);
+ GENERATE(Token::BIT_AND, NUMBER, INT32, INT32);
+ GENERATE(Token::BIT_AND, NUMBER, SMI, SMI);
+ GENERATE(Token::BIT_AND, SMI, INT32, INT32);
+ GENERATE(Token::BIT_AND, SMI, INT32, SMI);
+ GENERATE(Token::BIT_AND, SMI, NUMBER, SMI);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI);
+ GENERATE(Token::BIT_OR, INT32, INT32, INT32);
+ GENERATE(Token::BIT_OR, INT32, INT32, SMI);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32);
+ GENERATE(Token::BIT_OR, INT32, SMI, SMI);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, SMI);
+ GENERATE(Token::BIT_OR, SMI, INT32, INT32);
+ GENERATE(Token::BIT_OR, SMI, INT32, SMI);
+ GENERATE(Token::BIT_OR, SMI, SMI, SMI);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32);
+ GENERATE(Token::BIT_XOR, INT32, INT32, SMI);
+ GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32);
+ GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI);
+ GENERATE(Token::BIT_XOR, SMI, INT32, INT32);
+ GENERATE(Token::BIT_XOR, SMI, INT32, SMI);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI);
+ GENERATE(Token::DIV, INT32, INT32, INT32);
+ GENERATE(Token::DIV, INT32, INT32, NUMBER);
+ GENERATE(Token::DIV, INT32, NUMBER, NUMBER);
+ GENERATE(Token::DIV, INT32, SMI, INT32);
+ GENERATE(Token::DIV, INT32, SMI, NUMBER);
+ GENERATE(Token::DIV, NUMBER, INT32, NUMBER);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER);
+ GENERATE(Token::DIV, NUMBER, SMI, NUMBER);
+ GENERATE(Token::DIV, SMI, INT32, INT32);
+ GENERATE(Token::DIV, SMI, INT32, NUMBER);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER);
+ GENERATE(Token::DIV, SMI, SMI, SMI);
+ GENERATE(Token::MOD, NUMBER, SMI, NUMBER);
+ GENERATE(Token::MOD, SMI, SMI, SMI);
+ GENERATE(Token::MUL, INT32, INT32, INT32);
+ GENERATE(Token::MUL, INT32, INT32, NUMBER);
+ GENERATE(Token::MUL, INT32, NUMBER, NUMBER);
+ GENERATE(Token::MUL, INT32, SMI, INT32);
+ GENERATE(Token::MUL, INT32, SMI, NUMBER);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER);
+ GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER);
+ GENERATE(Token::MUL, SMI, INT32, INT32);
+ GENERATE(Token::MUL, SMI, INT32, NUMBER);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER);
+ GENERATE(Token::MUL, SMI, SMI, INT32);
+ GENERATE(Token::MUL, SMI, SMI, NUMBER);
+ GENERATE(Token::MUL, SMI, SMI, SMI);
+ GENERATE(Token::SAR, INT32, SMI, INT32);
+ GENERATE(Token::SAR, INT32, SMI, SMI);
+ GENERATE(Token::SAR, NUMBER, SMI, SMI);
+ GENERATE(Token::SAR, SMI, SMI, SMI);
+ GENERATE(Token::SHL, INT32, SMI, INT32);
+ GENERATE(Token::SHL, INT32, SMI, SMI);
+ GENERATE(Token::SHL, NUMBER, SMI, SMI);
+ GENERATE(Token::SHL, SMI, SMI, INT32);
+ GENERATE(Token::SHL, SMI, SMI, SMI);
+ GENERATE(Token::SHR, INT32, SMI, SMI);
+ GENERATE(Token::SHR, NUMBER, SMI, INT32);
+ GENERATE(Token::SHR, NUMBER, SMI, SMI);
+ GENERATE(Token::SHR, SMI, SMI, SMI);
+ GENERATE(Token::SUB, INT32, INT32, INT32);
+ GENERATE(Token::SUB, INT32, NUMBER, NUMBER);
+ GENERATE(Token::SUB, INT32, SMI, INT32);
+ GENERATE(Token::SUB, NUMBER, INT32, NUMBER);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER);
+ GENERATE(Token::SUB, SMI, INT32, INT32);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER);
+ GENERATE(Token::SUB, SMI, SMI, SMI);
#undef GENERATE
-#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
- do { \
- BinaryOpICState state(isolate, op, mode); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_.has_value = true; \
- state.fixed_right_arg_.value = fixed_right_arg_value; \
- state.right_kind_ = SMI; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
+#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind) \
+ do { \
+ BinaryOpICState state(isolate, op); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = true; \
+ state.fixed_right_arg_.value = fixed_right_arg_value; \
+ state.right_kind_ = SMI; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
} while (false)
- GENERATE(Token::MOD, SMI, 2, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 4, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MOD, SMI, 8, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MOD, SMI, 32, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 2, SMI);
+ GENERATE(Token::MOD, SMI, 4, SMI);
+ GENERATE(Token::MOD, SMI, 8, SMI);
+ GENERATE(Token::MOD, SMI, 16, SMI);
+ GENERATE(Token::MOD, SMI, 32, SMI);
+ GENERATE(Token::MOD, SMI, 2048, SMI);
#undef GENERATE
}
@@ -311,10 +223,6 @@ Type* BinaryOpICState::GetResultType(Zone* zone) const {
std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s) {
os << "(" << Token::Name(s.op_);
- if (s.mode_ == OVERWRITE_LEFT)
- os << "_ReuseLeft";
- else if (s.mode_ == OVERWRITE_RIGHT)
- os << "_ReuseRight";
if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
os << ":" << BinaryOpICState::KindToString(s.left_kind_) << "*";
if (s.fixed_right_arg_.has_value) {
@@ -364,14 +272,6 @@ void BinaryOpICState::Update(Handle<Object> left, Handle<Object> right,
left_kind_ = NUMBER;
}
- // Reset overwrite mode unless we can actually make use of it, or may be able
- // to make use of it at some point in the future.
- if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
- (mode_ == OVERWRITE_RIGHT && right_kind_ > NUMBER) ||
- result_kind_ > NUMBER) {
- mode_ = NO_OVERWRITE;
- }
-
if (old_extra_ic_state == GetExtraICState()) {
// Tagged operations can lead to non-truncating HChanges
if (left->IsUndefined() || left->IsBoolean()) {
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 72fc865c68..b5f58ed211 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -54,16 +54,12 @@ class CallICState FINAL BASE_EMBEDDED {
std::ostream& operator<<(std::ostream& os, const CallICState& s);
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-
class BinaryOpICState FINAL BASE_EMBEDDED {
public:
BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
- BinaryOpICState(Isolate* isolate, Token::Value op, OverwriteMode mode)
+ BinaryOpICState(Isolate* isolate, Token::Value op)
: op_(op),
- mode_(mode),
left_kind_(NONE),
right_kind_(NONE),
result_kind_(NONE),
@@ -91,14 +87,6 @@ class BinaryOpICState FINAL BASE_EMBEDDED {
void (*Generate)(Isolate*,
const BinaryOpICState&));
- bool CanReuseDoubleBox() const {
- return (result_kind_ > SMI && result_kind_ <= NUMBER) &&
- ((mode_ == OVERWRITE_LEFT && left_kind_ > SMI &&
- left_kind_ <= NUMBER) ||
- (mode_ == OVERWRITE_RIGHT && right_kind_ > SMI &&
- right_kind_ <= NUMBER));
- }
-
// Returns true if the IC _could_ create allocation mementos.
bool CouldCreateAllocationMementos() const {
if (left_kind_ == STRING || right_kind_ == STRING) {
@@ -127,7 +115,6 @@ class BinaryOpICState FINAL BASE_EMBEDDED {
static const int LAST_TOKEN = Token::MOD;
Token::Value op() const { return op_; }
- OverwriteMode mode() const { return mode_; }
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
Type* GetLeftType(Zone* zone) const { return KindToType(left_kind_, zone); }
@@ -154,17 +141,15 @@ class BinaryOpICState FINAL BASE_EMBEDDED {
// We truncate the last bit of the token.
STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
class OpField : public BitField<int, 0, 4> {};
- class OverwriteModeField : public BitField<OverwriteMode, 4, 2> {};
- class ResultKindField : public BitField<Kind, 6, 3> {};
- class LeftKindField : public BitField<Kind, 9, 3> {};
+ class ResultKindField : public BitField<Kind, 4, 3> {};
+ class LeftKindField : public BitField<Kind, 7, 3> {};
// When fixed right arg is set, we don't need to store the right kind.
// Thus the two fields can overlap.
- class HasFixedRightArgField : public BitField<bool, 12, 1> {};
- class FixedRightArgValueField : public BitField<int, 13, 4> {};
- class RightKindField : public BitField<Kind, 13, 3> {};
+ class HasFixedRightArgField : public BitField<bool, 10, 1> {};
+ class FixedRightArgValueField : public BitField<int, 11, 4> {};
+ class RightKindField : public BitField<Kind, 11, 3> {};
Token::Value op_;
- OverwriteMode mode_;
Kind left_kind_;
Kind right_kind_;
Kind result_kind_;
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 48cef68f5a..93f33cf663 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -142,6 +142,7 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus,
bool for_queries_only)
: isolate_(isolate),
target_set_(false),
+ vector_set_(false),
target_maps_set_(false),
nexus_(nexus) {
// To improve the performance of the (much used) IC code, we unfold a few
@@ -222,6 +223,13 @@ Code* IC::GetOriginalCode() const {
}
+bool IC::AddressIsOptimizedCode() const {
+ Code* host =
+ isolate()->inner_pointer_to_code_cache()->GetCacheEntry(address())->code;
+ return host->kind() == Code::OPTIMIZED_FUNCTION;
+}
+
+
static void LookupForRead(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -257,11 +265,10 @@ static void LookupForRead(LookupIterator* it) {
bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
Handle<String> name) {
if (!IsNameCompatibleWithPrototypeFailure(name)) return false;
- Handle<Map> receiver_map = TypeToMap(*receiver_type(), isolate());
if (UseVector()) {
- maybe_handler_ = nexus()->FindHandlerForMap(receiver_map);
+ maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
} else {
- maybe_handler_ = target()->FindHandlerForMap(*receiver_map);
+ maybe_handler_ = target()->FindHandlerForMap(*receiver_map());
}
// The current map wasn't handled yet. There's no reason to stay monomorphic,
@@ -270,21 +277,20 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
// TODO(verwaest): Check if the current map is actually what the old map
// would transition to.
if (maybe_handler_.is_null()) {
- if (!receiver_map->IsJSObjectMap()) return false;
+ if (!receiver_map()->IsJSObjectMap()) return false;
Map* first_map = FirstTargetMap();
if (first_map == NULL) return false;
Handle<Map> old_map(first_map);
if (old_map->is_deprecated()) return true;
if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
- receiver_map->elements_kind())) {
+ receiver_map()->elements_kind())) {
return true;
}
return false;
}
CacheHolderFlag flag;
- Handle<Map> ic_holder_map(
- GetICCacheHolder(*receiver_type(), isolate(), &flag));
+ Handle<Map> ic_holder_map(GetICCacheHolder(receiver_map(), isolate(), &flag));
DCHECK(flag != kCacheOnReceiver || receiver->IsJSObject());
DCHECK(flag != kCacheOnPrototype || !receiver->IsJSReceiver());
@@ -324,7 +330,7 @@ bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) {
void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
- update_receiver_type(receiver);
+ update_receiver_map(receiver);
if (!name->IsString()) return;
if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
if (receiver->IsUndefined() || receiver->IsNull()) return;
@@ -576,7 +582,7 @@ void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target,
if (IsCleared(target)) return;
SetTargetAtAddress(
address, *pre_monomorphic_stub(
- isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
+ isolate, StoreIC::GetLanguageMode(target->extra_ic_state())),
constant_pool);
}
@@ -594,11 +600,11 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
// static
-Handle<Code> KeyedLoadIC::generic_stub(Isolate* isolate) {
+Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate) {
if (FLAG_compiled_keyed_generic_loads) {
return KeyedLoadGenericStub(isolate).GetCode();
} else {
- return isolate->builtins()->KeyedLoadIC_Generic();
+ return isolate->builtins()->KeyedLoadIC_Megamorphic();
}
}
@@ -625,10 +631,10 @@ void IC::ConfigureVectorState(IC::State new_state) {
}
} else if (kind() == Code::KEYED_LOAD_IC) {
KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
- if (new_state == GENERIC) {
- nexus->ConfigureGeneric();
- } else if (new_state == PREMONOMORPHIC) {
+ if (new_state == PREMONOMORPHIC) {
nexus->ConfigurePremonomorphic();
+ } else if (new_state == MEGAMORPHIC) {
+ nexus->ConfigureMegamorphic();
} else {
UNREACHABLE();
}
@@ -636,40 +642,43 @@ void IC::ConfigureVectorState(IC::State new_state) {
UNREACHABLE();
}
+ vector_set_ = true;
OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
new_state);
}
-void IC::ConfigureVectorState(Handle<Name> name, Handle<HeapType> type,
+void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Code> handler) {
DCHECK(UseVector());
if (kind() == Code::LOAD_IC) {
LoadICNexus* nexus = casted_nexus<LoadICNexus>();
- nexus->ConfigureMonomorphic(type, handler);
+ nexus->ConfigureMonomorphic(map, handler);
} else {
DCHECK(kind() == Code::KEYED_LOAD_IC);
KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
- nexus->ConfigureMonomorphic(name, type, handler);
+ nexus->ConfigureMonomorphic(name, map, handler);
}
+ vector_set_ = true;
OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
MONOMORPHIC);
}
-void IC::ConfigureVectorState(Handle<Name> name, TypeHandleList* types,
+void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
CodeHandleList* handlers) {
DCHECK(UseVector());
if (kind() == Code::LOAD_IC) {
LoadICNexus* nexus = casted_nexus<LoadICNexus>();
- nexus->ConfigurePolymorphic(types, handlers);
+ nexus->ConfigurePolymorphic(maps, handlers);
} else {
DCHECK(kind() == Code::KEYED_LOAD_IC);
KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
- nexus->ConfigurePolymorphic(name, types, handlers);
+ nexus->ConfigurePolymorphic(name, maps, handlers);
}
+ vector_set_ = true;
OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
POLYMORPHIC);
}
@@ -689,9 +698,9 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// Rewrite to the generic keyed load stub.
if (FLAG_use_ic) {
if (UseVector()) {
- ConfigureVectorState(GENERIC);
+ ConfigureVectorState(MEGAMORPHIC);
} else {
- set_target(*KeyedLoadIC::generic_stub(isolate()));
+ set_target(*megamorphic_stub());
}
TRACE_IC("LoadIC", name);
TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
@@ -714,13 +723,21 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
ScriptContextTable::LookupResult lookup_result;
if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
+ Handle<Object> result =
+ FixedArray::get(ScriptContextTable::GetContext(
+ script_contexts, lookup_result.context_index),
+ lookup_result.slot_index);
+ if (*result == *isolate()->factory()->the_hole_value()) {
+ // Do not install stubs and stay pre-monomorphic for
+ // uninitialized accesses.
+ return ReferenceError("not_defined", name);
+ }
+
if (use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
LoadScriptContextFieldStub stub(isolate(), &lookup_result);
PatchCache(name, stub.GetCode());
}
- return FixedArray::get(ScriptContextTable::GetContext(
- script_contexts, lookup_result.context_index),
- lookup_result.slot_index);
+ return result;
}
}
@@ -764,74 +781,70 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
if (!code->is_handler()) return false;
if (target()->is_keyed_stub() && state() != PROTOTYPE_FAILURE) return false;
- Handle<HeapType> type = receiver_type();
- TypeHandleList types;
+ Handle<Map> map = receiver_map();
+ MapHandleList maps;
CodeHandleList handlers;
- TargetTypes(&types);
- int number_of_types = types.length();
- int deprecated_types = 0;
+ TargetMaps(&maps);
+ int number_of_maps = maps.length();
+ int deprecated_maps = 0;
int handler_to_overwrite = -1;
- for (int i = 0; i < number_of_types; i++) {
- Handle<HeapType> current_type = types.at(i);
- if (current_type->IsClass() &&
- current_type->AsClass()->Map()->is_deprecated()) {
+ for (int i = 0; i < number_of_maps; i++) {
+ Handle<Map> current_map = maps.at(i);
+ if (current_map->is_deprecated()) {
// Filter out deprecated maps to ensure their instances get migrated.
- ++deprecated_types;
- } else if (type->NowIs(current_type)) {
+ ++deprecated_maps;
+ } else if (map.is_identical_to(current_map)) {
// If the receiver type is already in the polymorphic IC, this indicates
// there was a prototoype chain failure. In that case, just overwrite the
// handler.
handler_to_overwrite = i;
- } else if (handler_to_overwrite == -1 && current_type->IsClass() &&
- type->IsClass() &&
- IsTransitionOfMonomorphicTarget(*current_type->AsClass()->Map(),
- *type->AsClass()->Map())) {
+ } else if (handler_to_overwrite == -1 &&
+ IsTransitionOfMonomorphicTarget(*current_map, *map)) {
handler_to_overwrite = i;
}
}
- int number_of_valid_types =
- number_of_types - deprecated_types - (handler_to_overwrite != -1);
+ int number_of_valid_maps =
+ number_of_maps - deprecated_maps - (handler_to_overwrite != -1);
- if (number_of_valid_types >= 4) return false;
- if (number_of_types == 0 && state() != MONOMORPHIC &&
- state() != POLYMORPHIC) {
+ if (number_of_valid_maps >= 4) return false;
+ if (number_of_maps == 0 && state() != MONOMORPHIC && state() != POLYMORPHIC) {
return false;
}
if (UseVector()) {
- if (!nexus()->FindHandlers(&handlers, types.length())) return false;
+ if (!nexus()->FindHandlers(&handlers, maps.length())) return false;
} else {
- if (!target()->FindHandlers(&handlers, types.length())) return false;
+ if (!target()->FindHandlers(&handlers, maps.length())) return false;
}
- number_of_valid_types++;
- if (number_of_valid_types > 1 && target()->is_keyed_stub()) return false;
+ number_of_valid_maps++;
+ if (number_of_valid_maps > 1 && target()->is_keyed_stub()) return false;
Handle<Code> ic;
- if (number_of_valid_types == 1) {
+ if (number_of_valid_maps == 1) {
if (UseVector()) {
- ConfigureVectorState(name, receiver_type(), code);
+ ConfigureVectorState(name, receiver_map(), code);
} else {
- ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, type, code,
+ ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, map, code,
extra_ic_state());
}
} else {
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, code);
- if (!type->NowIs(types.at(handler_to_overwrite))) {
- types.Set(handler_to_overwrite, type);
+ if (!map.is_identical_to(maps.at(handler_to_overwrite))) {
+ maps.Set(handler_to_overwrite, map);
}
} else {
- types.Add(type);
+ maps.Add(map);
handlers.Add(code);
}
if (UseVector()) {
- ConfigureVectorState(name, &types, &handlers);
+ ConfigureVectorState(name, &maps, &handlers);
} else {
- ic = PropertyICCompiler::ComputePolymorphic(kind(), &types, &handlers,
- number_of_valid_types, name,
+ ic = PropertyICCompiler::ComputePolymorphic(kind(), &maps, &handlers,
+ number_of_valid_maps, name,
extra_ic_state());
}
}
@@ -841,66 +854,25 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
}
-Handle<HeapType> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
- return object->IsJSGlobalObject()
- ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate)
- : HeapType::NowOf(object, isolate);
-}
-
-
-Handle<Map> IC::TypeToMap(HeapType* type, Isolate* isolate) {
- if (type->Is(HeapType::Number()))
- return isolate->factory()->heap_number_map();
- if (type->Is(HeapType::Boolean())) return isolate->factory()->boolean_map();
- if (type->IsConstant()) {
- return handle(
- Handle<JSGlobalObject>::cast(type->AsConstant()->Value())->map());
- }
- DCHECK(type->IsClass());
- return type->AsClass()->Map();
-}
-
-
-template <class T>
-typename T::TypeHandle IC::MapToType(Handle<Map> map,
- typename T::Region* region) {
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- return T::Number(region);
- } else if (map->instance_type() == ODDBALL_TYPE) {
- // The only oddballs that can be recorded in ICs are booleans.
- return T::Boolean(region);
- } else {
- return T::Class(map, region);
- }
-}
-
-
-template Type* IC::MapToType<Type>(Handle<Map> map, Zone* zone);
-
-
-template Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map,
- Isolate* region);
-
-
void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
DCHECK(handler->is_handler());
if (UseVector()) {
- ConfigureVectorState(name, receiver_type(), handler);
+ ConfigureVectorState(name, receiver_map(), handler);
} else {
Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
- kind(), name, receiver_type(), handler, extra_ic_state());
+ kind(), name, receiver_map(), handler, extra_ic_state());
set_target(*ic);
}
}
void IC::CopyICToMegamorphicCache(Handle<Name> name) {
- TypeHandleList types;
+ MapHandleList maps;
CodeHandleList handlers;
- TargetTypes(&types);
- if (!target()->FindHandlers(&handlers, types.length())) return;
- for (int i = 0; i < types.length(); i++) {
- UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i));
+ TargetMaps(&maps);
+ if (!target()->FindHandlers(&handlers, maps.length())) return;
+ for (int i = 0; i < maps.length(); i++) {
+ UpdateMegamorphicCache(*maps.at(i), *name, *handlers.at(i));
}
}
@@ -936,26 +908,25 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
CopyICToMegamorphicCache(name);
}
if (UseVector()) {
- ConfigureVectorState(kind() == Code::KEYED_LOAD_IC ? GENERIC
- : MEGAMORPHIC);
+ ConfigureVectorState(MEGAMORPHIC);
} else {
set_target(*megamorphic_stub());
}
// Fall through.
case MEGAMORPHIC:
- UpdateMegamorphicCache(*receiver_type(), *name, *code);
+ UpdateMegamorphicCache(*receiver_map(), *name, *code);
// Indicate that we've handled this case.
- target_set_ = true;
+ if (UseVector()) {
+ vector_set_ = true;
+ } else {
+ target_set_ = true;
+ }
break;
case DEBUG_STUB:
break;
case DEFAULT:
- UNREACHABLE();
- break;
case GENERIC:
- // The generic keyed store stub re-uses store handlers, which can miss.
- // That's ok, no reason to do anything.
- DCHECK(target()->kind() == Code::KEYED_STORE_IC);
+ UNREACHABLE();
break;
}
}
@@ -1003,7 +974,7 @@ Handle<Code> LoadIC::megamorphic_stub() {
return stub.GetCode();
} else {
DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return KeyedLoadIC::generic_stub(isolate());
+ return KeyedLoadIC::ChooseMegamorphicStub(isolate());
}
}
@@ -1056,7 +1027,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
} else if (!lookup->IsFound()) {
if (kind() == Code::LOAD_IC) {
code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
- receiver_type());
+ receiver_map());
// TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
if (code.is_null()) code = slow_stub();
} else {
@@ -1071,10 +1042,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
}
-void IC::UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {
- // Megamorphic state isn't implemented for keyed loads currently.
- if (kind() == Code::KEYED_LOAD_IC) return;
- Map* map = *TypeToMap(type, isolate());
+void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
isolate()->stub_cache()->Set(name, map, code);
}
@@ -1084,7 +1052,7 @@ Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> value) {
lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
CacheHolderFlag flag;
Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder(
- *receiver_type(), receiver_is_holder, isolate(), &flag);
+ receiver_map(), receiver_is_holder, isolate(), &flag);
Handle<Code> code = PropertyHandlerCompiler::Find(
lookup->name(), stub_holder_map, kind(), flag,
@@ -1157,14 +1125,13 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
return function_prototype_stub.GetCode();
}
- Handle<HeapType> type = receiver_type();
+ Handle<Map> map = receiver_map();
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
bool receiver_is_holder = receiver.is_identical_to(holder);
switch (lookup->state()) {
case LookupIterator::INTERCEPTOR: {
DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
// Perform a lookup behind the interceptor. Copy the LookupIterator since
// the original iterator will be used to fetch the value.
LookupIterator it = *lookup;
@@ -1179,8 +1146,8 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
DCHECK(receiver->IsJSObject());
Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
int object_offset;
- if (Accessors::IsJSObjectFieldAccessor<HeapType>(type, lookup->name(),
- &object_offset)) {
+ if (Accessors::IsJSObjectFieldAccessor(map, lookup->name(),
+ &object_offset)) {
FieldIndex index =
FieldIndex::ForInObjectOffset(object_offset, js_receiver->map());
return SimpleFieldLoad(index);
@@ -1192,13 +1159,12 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
Handle<ExecutableAccessorInfo> info =
Handle<ExecutableAccessorInfo>::cast(accessors);
if (v8::ToCData<Address>(info->getter()) == 0) break;
- if (!ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), info,
- type)) {
+ if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
+ map)) {
break;
}
if (!holder->HasFastProperties()) break;
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
return compiler.CompileLoadCallback(lookup->name(), info);
}
if (accessors->IsAccessorPair()) {
@@ -1208,23 +1174,23 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
if (!holder->HasFastProperties()) break;
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
if (!receiver->IsJSObject() && !function->IsBuiltin() &&
- function->shared()->strict_mode() == SLOPPY) {
+ is_sloppy(function->shared()->language_mode())) {
// Calling sloppy non-builtins with a value as the receiver
// requires boxing.
break;
}
CallOptimization call_optimization(function);
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
if (call_optimization.is_simple_api_call() &&
call_optimization.IsCompatibleReceiver(receiver, holder)) {
- return compiler.CompileLoadCallback(lookup->name(),
- call_optimization);
+ return compiler.CompileLoadCallback(lookup->name(), call_optimization,
+ lookup->GetAccessorIndex());
}
- return compiler.CompileLoadViaGetter(lookup->name(), function);
+ int expected_arguments =
+ function->shared()->internal_formal_parameter_count();
+ return compiler.CompileLoadViaGetter(
+ lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
}
- // TODO(dcarney): Handle correctly.
- DCHECK(accessors->IsDeclaredAccessorInfo());
break;
}
@@ -1232,15 +1198,15 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
if (lookup->is_dictionary_holder()) {
if (kind() != Code::LOAD_IC) break;
if (holder->IsGlobalObject()) {
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder,
cache_holder);
Handle<PropertyCell> cell = lookup->GetPropertyCell();
Handle<Code> code = compiler.CompileLoadGlobal(
cell, lookup->name(), lookup->IsConfigurable());
// TODO(verwaest): Move caching of these NORMAL stubs outside as well.
CacheHolderFlag flag;
- Handle<Map> stub_holder_map = GetHandlerCacheHolder(
- *type, receiver_is_holder, isolate(), &flag);
+ Handle<Map> stub_holder_map =
+ GetHandlerCacheHolder(map, receiver_is_holder, isolate(), &flag);
Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
return code;
}
@@ -1253,24 +1219,22 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
}
// -------------- Fields --------------
- if (lookup->property_details().type() == FIELD) {
+ if (lookup->property_details().type() == DATA) {
FieldIndex field = lookup->GetFieldIndex();
if (receiver_is_holder) {
return SimpleFieldLoad(field);
}
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
return compiler.CompileLoadField(lookup->name(), field);
}
// -------------- Constant properties --------------
- DCHECK(lookup->property_details().type() == CONSTANT);
+ DCHECK(lookup->property_details().type() == DATA_CONSTANT);
if (receiver_is_holder) {
LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
return stub.GetCode();
}
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
return compiler.CompileLoadConstant(lookup->name(),
lookup->GetConstantIndex());
}
@@ -1317,7 +1281,7 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
if (FLAG_vector_ics) {
Handle<Code> handler =
PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(receiver_map);
- ConfigureVectorState(Handle<Name>::null(), receiver_type(), handler);
+ ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
return null_handle;
}
return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
@@ -1337,7 +1301,7 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
if (FLAG_vector_ics) {
Handle<Code> handler =
PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(receiver_map);
- ConfigureVectorState(Handle<Name>::null(), receiver_type(), handler);
+ ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
return null_handle;
}
return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
@@ -1351,33 +1315,22 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
- if (FLAG_vector_ics) {
- ConfigureVectorState(GENERIC);
- return null_handle;
- }
- return generic_stub();
+ return megamorphic_stub();
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
- if (FLAG_vector_ics) {
- ConfigureVectorState(GENERIC);
- return null_handle;
- }
- return generic_stub();
+ return megamorphic_stub();
}
if (FLAG_vector_ics) {
CodeHandleList handlers(target_receiver_maps.length());
ElementHandlerCompiler compiler(isolate());
compiler.CompileElementHandlers(&target_receiver_maps, &handlers);
- TypeHandleList types(target_receiver_maps.length());
- for (int i = 0; i < target_receiver_maps.length(); i++) {
- types.Add(HeapType::Class(target_receiver_maps.at(i), isolate()));
- }
- ConfigureVectorState(Handle<Name>::null(), &types, &handlers);
+ ConfigureVectorState(Handle<Name>::null(), &target_receiver_maps,
+ &handlers);
return null_handle;
}
@@ -1396,7 +1349,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
}
Handle<Object> load_handle;
- Handle<Code> stub = generic_stub();
+ Handle<Code> stub = megamorphic_stub();
// Check for non-string values that can be converted into an
// internalized string directly or is representable as a smi.
@@ -1415,16 +1368,26 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
}
}
- if (!is_target_set()) {
- if (!FLAG_vector_ics) {
- Code* generic = *generic_stub();
+ if (!UseVector()) {
+ if (!is_target_set()) {
+ Code* generic = *megamorphic_stub();
if (*stub == generic) {
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
}
set_target(*stub);
+ TRACE_IC("LoadIC", key);
+ }
+ } else {
+ if (!is_vector_set() || stub.is_null()) {
+ Code* generic = *megamorphic_stub();
+ if (!stub.is_null() && *stub == generic) {
+ ConfigureVectorState(MEGAMORPHIC);
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+ }
+
+ TRACE_IC("LoadIC", key);
}
- TRACE_IC("LoadIC", key);
}
if (!load_handle.is_null()) return load_handle;
@@ -1473,7 +1436,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
it->PrepareForDataProperty(value);
// The previous receiver map might just have been deprecated,
// so reload it.
- update_receiver_type(receiver);
+ update_receiver_map(receiver);
return true;
}
@@ -1490,6 +1453,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
}
}
+ if (it->IsSpecialNumericIndex()) return false;
it->PrepareTransitionToDataProperty(value, NONE, store_mode);
return it->IsCacheableTransition();
}
@@ -1530,7 +1494,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- Object::SetProperty(object, name, value, strict_mode()), Object);
+ Object::SetProperty(object, name, value, language_mode()), Object);
return result;
}
@@ -1551,7 +1515,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- JSObject::SetElement(receiver, index, value, NONE, strict_mode()),
+ JSObject::SetElement(receiver, index, value, NONE, language_mode()),
Object);
return value;
}
@@ -1562,7 +1526,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- Object::SetProperty(object, name, value, strict_mode(), store_mode),
+ Object::SetProperty(object, name, value, language_mode(), store_mode),
Object);
return result;
}
@@ -1574,13 +1538,21 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- Object::SetProperty(&it, value, strict_mode(), store_mode), Object);
+ Object::SetProperty(&it, value, language_mode(), store_mode), Object);
return result;
}
Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
CallICState::CallType call_type) {
+ CallICTrampolineStub stub(isolate, CallICState(argc, call_type));
+ Handle<Code> code = stub.GetCode();
+ return code;
+}
+
+
+Handle<Code> CallIC::initialize_stub_in_optimized_code(
+ Isolate* isolate, int argc, CallICState::CallType call_type) {
CallICStub stub(isolate, CallICState(argc, call_type));
Handle<Code> code = stub.GetCode();
return code;
@@ -1588,8 +1560,8 @@ Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
- StrictMode strict_mode) {
- ExtraICState extra_state = ComputeExtraICState(strict_mode);
+ LanguageMode language_mode) {
+ ExtraICState extra_state = ComputeExtraICState(language_mode);
Handle<Code> ic =
PropertyICCompiler::ComputeStore(isolate, UNINITIALIZED, extra_state);
return ic;
@@ -1602,7 +1574,7 @@ Handle<Code> StoreIC::megamorphic_stub() {
extra_ic_state());
} else {
DCHECK(kind() == Code::KEYED_STORE_IC);
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
return isolate()->builtins()->KeyedStoreIC_Megamorphic_Strict();
} else {
return isolate()->builtins()->KeyedStoreIC_Megamorphic();
@@ -1611,21 +1583,6 @@ Handle<Code> StoreIC::megamorphic_stub() {
}
-Handle<Code> StoreIC::generic_stub() const {
- if (kind() == Code::STORE_IC) {
- return PropertyICCompiler::ComputeStore(isolate(), GENERIC,
- extra_ic_state());
- } else {
- DCHECK(kind() == Code::KEYED_STORE_IC);
- if (strict_mode() == STRICT) {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
- } else {
- return isolate()->builtins()->KeyedStoreIC_Generic();
- }
- }
-}
-
-
Handle<Code> StoreIC::slow_stub() const {
if (kind() == Code::STORE_IC) {
return isolate()->builtins()->StoreIC_Slow();
@@ -1637,8 +1594,8 @@ Handle<Code> StoreIC::slow_stub() const {
Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
- StrictMode strict_mode) {
- ExtraICState state = ComputeExtraICState(strict_mode);
+ LanguageMode language_mode) {
+ ExtraICState state = ComputeExtraICState(language_mode);
return PropertyICCompiler::ComputeStore(isolate, PREMONOMORPHIC, state);
}
@@ -1664,6 +1621,19 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
}
+static Handle<Code> PropertyCellStoreHandler(
+ Isolate* isolate, Handle<JSObject> receiver, Handle<GlobalObject> holder,
+ Handle<Name> name, Handle<PropertyCell> cell, Handle<Object> value) {
+ auto union_type = PropertyCell::UpdatedType(cell, value);
+ StoreGlobalStub stub(isolate, union_type->IsConstant(),
+ receiver->IsJSGlobalProxy());
+ auto code = stub.GetCodeCopyFromTemplate(holder, cell);
+ // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
+}
+
+
Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
Handle<Object> value,
CacheHolderFlag cache_holder) {
@@ -1676,6 +1646,13 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
switch (lookup->state()) {
case LookupIterator::TRANSITION: {
+ auto store_target = lookup->GetStoreTarget();
+ if (store_target->IsGlobalObject()) {
+ auto cell = lookup->GetTransitionPropertyCell();
+ return PropertyCellStoreHandler(
+ isolate(), store_target, Handle<GlobalObject>::cast(store_target),
+ lookup->name(), cell, value);
+ }
Handle<Map> transition = lookup->transition_map();
// Currently not handled by CompileStoreTransition.
if (!holder->HasFastProperties()) {
@@ -1684,13 +1661,13 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
}
DCHECK(lookup->IsCacheableTransition());
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
return compiler.CompileStoreTransition(transition, lookup->name());
}
case LookupIterator::INTERCEPTOR: {
DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
return compiler.CompileStoreInterceptor(lookup->name());
}
@@ -1707,12 +1684,12 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0");
break;
}
- if (!ExecutableAccessorInfo::IsCompatibleReceiverType(
- isolate(), info, receiver_type())) {
+ if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
+ receiver_map())) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
break;
}
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
return compiler.CompileStoreCallback(receiver, lookup->name(), info);
} else if (accessors->IsAccessorPair()) {
Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
@@ -1723,40 +1700,38 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
}
Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
CallOptimization call_optimization(function);
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
if (call_optimization.is_simple_api_call() &&
call_optimization.IsCompatibleReceiver(receiver, holder)) {
return compiler.CompileStoreCallback(receiver, lookup->name(),
- call_optimization);
+ call_optimization,
+ lookup->GetAccessorIndex());
}
+ int expected_arguments =
+ function->shared()->internal_formal_parameter_count();
return compiler.CompileStoreViaSetter(receiver, lookup->name(),
- Handle<JSFunction>::cast(setter));
+ lookup->GetAccessorIndex(),
+ expected_arguments);
}
- // TODO(dcarney): Handle correctly.
- DCHECK(accessors->IsDeclaredAccessorInfo());
- TRACE_GENERIC_IC(isolate(), "StoreIC", "declared accessor info");
break;
}
case LookupIterator::DATA: {
if (lookup->is_dictionary_holder()) {
if (holder->IsGlobalObject()) {
- Handle<PropertyCell> cell = lookup->GetPropertyCell();
- Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
- StoreGlobalStub stub(isolate(), union_type->IsConstant(),
- receiver->IsJSGlobalProxy());
- Handle<Code> code = stub.GetCodeCopyFromTemplate(
- Handle<GlobalObject>::cast(holder), cell);
- // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
- HeapObject::UpdateMapCodeCache(receiver, lookup->name(), code);
- return code;
+ DCHECK(holder.is_identical_to(receiver) ||
+ receiver->map()->prototype() == *holder);
+ auto cell = lookup->GetPropertyCell();
+ return PropertyCellStoreHandler(isolate(), receiver,
+ Handle<GlobalObject>::cast(holder),
+ lookup->name(), cell, value);
}
DCHECK(holder.is_identical_to(receiver));
return isolate()->builtins()->StoreIC_Normal();
}
// -------------- Fields --------------
- if (lookup->property_details().type() == FIELD) {
+ if (lookup->property_details().type() == DATA) {
bool use_stub = true;
if (lookup->representation().IsHeapObject()) {
// Only use a generic stub if no types need to be tracked.
@@ -1769,12 +1744,12 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
lookup->representation());
return stub.GetCode();
}
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
return compiler.CompileStoreField(lookup);
}
// -------------- Constant properties --------------
- DCHECK(lookup->property_details().type() == CONSTANT);
+ DCHECK(lookup->property_details().type() == DATA_CONSTANT);
TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
break;
}
@@ -1790,12 +1765,13 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
KeyedAccessStoreMode store_mode) {
- // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
+ // Don't handle megamorphic property accesses for INTERCEPTORS or
+ // ACCESSOR_CONSTANT
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != Code::NORMAL) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-NORMAL target type");
- return generic_stub();
+ return megamorphic_stub();
}
Handle<Map> receiver_map(receiver->map(), isolate());
@@ -1806,7 +1782,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- monomorphic_map, strict_mode(), store_mode);
+ monomorphic_map, language_mode(), store_mode);
}
// There are several special cases where an IC that is MONOMORPHIC can still
@@ -1831,7 +1807,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
store_mode = GetNonTransitioningStoreMode(store_mode);
return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- transitioned_receiver_map, strict_mode(), store_mode);
+ transitioned_receiver_map, language_mode(), store_mode);
} else if (*previous_receiver_map == receiver->map() &&
old_store_mode == STANDARD_STORE &&
(store_mode == STORE_AND_GROW_NO_TRANSITION ||
@@ -1841,7 +1817,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- receiver_map, strict_mode(), store_mode);
+ receiver_map, language_mode(), store_mode);
}
}
@@ -1859,9 +1835,9 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
if (!map_added) {
// If the miss wasn't due to an unseen map, a polymorphic stub
- // won't help, use the generic stub.
+ // won't help, use the megamorphic stub which can handle everything.
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice");
- return generic_stub();
+ return megamorphic_stub();
}
// If the maximum number of receiver maps has been exceeded, use the
@@ -1871,20 +1847,20 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
}
// Make sure all polymorphic handlers have the same store mode, otherwise the
- // generic stub must be used.
+ // megamorphic stub must be used.
store_mode = GetNonTransitioningStoreMode(store_mode);
if (old_store_mode != STANDARD_STORE) {
if (store_mode == STANDARD_STORE) {
store_mode = old_store_mode;
} else if (store_mode != old_store_mode) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch");
- return generic_stub();
+ return megamorphic_stub();
}
}
// If the store mode isn't the standard mode, make sure that all polymorphic
// receivers are either external arrays, or all "normal" arrays. Otherwise,
- // use the generic stub.
+ // use the megamorphic stub.
if (store_mode != STANDARD_STORE) {
int external_arrays = 0;
for (int i = 0; i < target_receiver_maps.length(); ++i) {
@@ -1897,12 +1873,12 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
external_arrays != target_receiver_maps.length()) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
"unsupported combination of external and normal arrays");
- return generic_stub();
+ return megamorphic_stub();
}
}
return PropertyICCompiler::ComputeKeyedStorePolymorphic(
- &target_receiver_maps, store_mode, strict_mode());
+ &target_receiver_maps, store_mode, language_mode());
}
@@ -2032,7 +2008,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result, Runtime::SetObjectProperty(isolate(), object, key,
- value, strict_mode()),
+ value, language_mode()),
Object);
return result;
}
@@ -2042,12 +2018,12 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
key = TryConvertKey(key, isolate());
Handle<Object> store_handle;
- Handle<Code> stub = generic_stub();
+ Handle<Code> stub = megamorphic_stub();
- if (key->IsInternalizedString()) {
+ if (key->IsInternalizedString() || key->IsSymbol()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), store_handle,
- StoreIC::Store(object, Handle<String>::cast(key), value,
+ StoreIC::Store(object, Handle<Name>::cast(key), value,
JSReceiver::MAY_BE_STORE_FROM_KEYED),
Object);
if (!is_target_set()) {
@@ -2082,7 +2058,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
bool key_is_smi_like = !Object::ToSmi(isolate(), key).is_null();
if (receiver->elements()->map() ==
isolate()->heap()->sloppy_arguments_elements_map()) {
- if (strict_mode() == SLOPPY) {
+ if (is_sloppy(language_mode())) {
stub = sloppy_arguments_stub();
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
@@ -2111,13 +2087,13 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), store_handle,
Runtime::SetObjectProperty(isolate(), object, key, value,
- strict_mode()),
+ language_mode()),
Object);
}
DCHECK(!is_target_set());
- Code* generic = *generic_stub();
- if (*stub == generic) {
+ Code* megamorphic = *megamorphic_stub();
+ if (*stub == megamorphic) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
}
if (*stub == *slow_stub()) {
@@ -2131,14 +2107,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
-// static
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictMode strict_mode) {
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
-}
-
-
-bool CallIC::DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
+bool CallIC::DoCustomHandler(Handle<Object> function,
const CallICState& callic_state) {
DCHECK(FLAG_use_ic && function->IsJSFunction());
@@ -2150,8 +2119,16 @@ bool CallIC::DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
CallICNexus* nexus = casted_nexus<CallICNexus>();
nexus->ConfigureMonomorphicArray();
- CallIC_ArrayStub stub(isolate(), callic_state);
- set_target(*stub.GetCode());
+ // Vector-based ICs have a different calling convention in optimized code
+ // than full code so the correct stub has to be chosen.
+ if (AddressIsOptimizedCode()) {
+ CallIC_ArrayStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ } else {
+ CallIC_ArrayTrampolineStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ }
+
Handle<String> name;
if (array_function->shared()->name()->IsString()) {
name = Handle<String>(String::cast(array_function->shared()->name()),
@@ -2173,9 +2150,15 @@ void CallIC::PatchMegamorphic(Handle<Object> function) {
CallICNexus* nexus = casted_nexus<CallICNexus>();
nexus->ConfigureGeneric();
- CallICStub stub(isolate(), callic_state);
- Handle<Code> code = stub.GetCode();
- set_target(*code);
+ // Vector-based ICs have a different calling convention in optimized code
+ // than full code so the correct stub has to be chosen.
+ if (AddressIsOptimizedCode()) {
+ CallICStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ } else {
+ CallICTrampolineStub stub(isolate(), callic_state);
+ set_target(*stub.GetCode());
+ }
Handle<Object> name = isolate()->factory()->empty_string();
if (function->IsJSFunction()) {
@@ -2189,7 +2172,7 @@ void CallIC::PatchMegamorphic(Handle<Object> function) {
}
-void CallIC::HandleMiss(Handle<Object> receiver, Handle<Object> function) {
+void CallIC::HandleMiss(Handle<Object> function) {
CallICState callic_state(target()->extra_ic_state());
Handle<Object> name = isolate()->factory()->empty_string();
CallICNexus* nexus = casted_nexus<CallICNexus>();
@@ -2198,7 +2181,7 @@ void CallIC::HandleMiss(Handle<Object> receiver, Handle<Object> function) {
// Hand-coded MISS handling is easier if CallIC slots don't contain smis.
DCHECK(!feedback->IsSmi());
- if (feedback->IsJSFunction() || !function->IsJSFunction()) {
+ if (feedback->IsWeakCell() || !function->IsJSFunction()) {
// We are going generic.
nexus->ConfigureGeneric();
} else {
@@ -2212,7 +2195,7 @@ void CallIC::HandleMiss(Handle<Object> receiver, Handle<Object> function) {
feedback->IsAllocationSite());
// Do we want to install a custom handler?
- if (FLAG_use_ic && DoCustomHandler(receiver, function, callic_state)) {
+ if (FLAG_use_ic && DoCustomHandler(function, callic_state)) {
return;
}
@@ -2241,15 +2224,14 @@ void CallIC::HandleMiss(Handle<Object> receiver, Handle<Object> function) {
RUNTIME_FUNCTION(CallIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> function = args.at<Object>(1);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
- Handle<Smi> slot = args.at<Smi>(3);
+ DCHECK(args.length() == 3);
+ Handle<Object> function = args.at<Object>(0);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
+ Handle<Smi> slot = args.at<Smi>(2);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
CallICNexus nexus(vector, vector_slot);
CallIC ic(isolate, &nexus);
- ic.HandleMiss(receiver, function);
+ ic.HandleMiss(function);
return *function;
}
@@ -2257,10 +2239,10 @@ RUNTIME_FUNCTION(CallIC_Miss) {
RUNTIME_FUNCTION(CallIC_Customization_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- Handle<Object> function = args.at<Object>(1);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
- Handle<Smi> slot = args.at<Smi>(3);
+ DCHECK(args.length() == 3);
+ Handle<Object> function = args.at<Object>(0);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
+ Handle<Smi> slot = args.at<Smi>(2);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
CallICNexus nexus(vector, vector_slot);
// A miss on a custom call ic always results in going megamorphic.
@@ -2434,11 +2416,11 @@ RUNTIME_FUNCTION(StoreIC_Slow) {
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictMode strict_mode = ic.strict_mode();
+ LanguageMode language_mode = ic.language_mode();
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
return *result;
}
@@ -2450,11 +2432,11 @@ RUNTIME_FUNCTION(KeyedStoreIC_Slow) {
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictMode strict_mode = ic.strict_mode();
+ LanguageMode language_mode = ic.language_mode();
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
return *result;
}
@@ -2468,7 +2450,7 @@ RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) {
Handle<Map> map = args.at<Map>(1);
Handle<Object> key = args.at<Object>(2);
Handle<Object> object = args.at<Object>(3);
- StrictMode strict_mode = ic.strict_mode();
+ LanguageMode language_mode = ic.language_mode();
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
@@ -2476,7 +2458,7 @@ RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
return *result;
}
@@ -2522,7 +2504,7 @@ MaybeHandle<Object> BinaryOpIC::Transition(
target = stub.GetCode();
// Sanity check the generic stub.
- DCHECK_EQ(NULL, target->FindFirstAllocationSite());
+ DCHECK_NULL(target->FindFirstAllocationSite());
}
set_target(*target);
@@ -2780,11 +2762,17 @@ RUNTIME_FUNCTION(ToBooleanIC_Miss) {
RUNTIME_FUNCTION(StoreCallbackProperty) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<JSObject> holder = args.at<JSObject>(1);
- Handle<ExecutableAccessorInfo> callback = args.at<ExecutableAccessorInfo>(2);
+ Handle<HeapObject> callback_or_cell = args.at<HeapObject>(2);
Handle<Name> name = args.at<Name>(3);
Handle<Object> value = args.at<Object>(4);
HandleScope scope(isolate);
+ Handle<ExecutableAccessorInfo> callback(
+ callback_or_cell->IsWeakCell()
+ ? ExecutableAccessorInfo::cast(
+ WeakCell::cast(*callback_or_cell)->value())
+ : ExecutableAccessorInfo::cast(*callback_or_cell));
+
DCHECK(callback->IsCompatibleReceiver(*receiver));
Address setter_address = v8::ToCData<Address>(callback->setter());
@@ -2812,8 +2800,12 @@ RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
Handle<Name> name =
args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
- Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(
- NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex);
+ Handle<JSObject> receiver =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+ Handle<JSObject> holder =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
+ HandleScope scope(isolate);
+ Handle<InterceptorInfo> interceptor_info(holder->GetNamedInterceptor());
if (name->IsSymbol() && !interceptor_info->can_intercept_symbols())
return isolate->heap()->no_interceptor_result_sentinel();
@@ -2823,15 +2815,10 @@ RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
FUNCTION_CAST<v8::GenericNamedPropertyGetterCallback>(getter_address);
DCHECK(getter != NULL);
- Handle<JSObject> receiver =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
- Handle<JSObject> holder =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
PropertyCallbackArguments callback_args(isolate, interceptor_info->data(),
*receiver, *holder);
{
// Use the interceptor getter.
- HandleScope scope(isolate);
v8::Handle<v8::Value> r =
callback_args.Call(getter, v8::Utils::ToLocal(name));
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
@@ -2911,7 +2898,7 @@ RUNTIME_FUNCTION(StorePropertyWithInterceptor) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- JSObject::SetProperty(receiver, name, value, ic.strict_mode()));
+ JSObject::SetProperty(receiver, name, value, ic.language_mode()));
return *result;
}
@@ -2924,7 +2911,7 @@ RUNTIME_FUNCTION(LoadElementWithInterceptor) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- JSObject::GetElementWithInterceptor(receiver, receiver, index));
+ JSObject::GetElementWithInterceptor(receiver, receiver, index, true));
return *result;
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 541fa0c7dc..8c1c82eac7 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -92,14 +92,14 @@ class IC {
bool IsCallStub() const { return target()->is_call_stub(); }
#endif
- template <class TypeClass>
- static JSFunction* GetRootConstructor(TypeClass* type,
- Context* native_context);
- static inline Handle<Map> GetHandlerCacheHolder(HeapType* type,
+ static inline JSFunction* GetRootConstructor(Map* receiver_map,
+ Context* native_context);
+ static inline Handle<Map> GetHandlerCacheHolder(Handle<Map> receiver_map,
bool receiver_is_holder,
Isolate* isolate,
CacheHolderFlag* flag);
- static inline Handle<Map> GetICCacheHolder(HeapType* type, Isolate* isolate,
+ static inline Handle<Map> GetICCacheHolder(Handle<Map> receiver_map,
+ Isolate* isolate,
CacheHolderFlag* flag);
static bool IsCleared(Code* code) {
@@ -112,19 +112,6 @@ class IC {
return state == UNINITIALIZED || state == PREMONOMORPHIC;
}
- // Utility functions to convert maps to types and back. There are two special
- // cases:
- // - The heap_number_map is used as a marker which includes heap numbers as
- // well as smis.
- // - The oddball map is only used for booleans.
- static Handle<Map> TypeToMap(HeapType* type, Isolate* isolate);
- template <class T>
- static typename T::TypeHandle MapToType(Handle<Map> map,
- typename T::Region* region);
-
- static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
- Isolate* isolate);
-
static bool ICUseVector(Code::Kind kind) {
return (FLAG_vector_ics &&
(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC)) ||
@@ -146,9 +133,12 @@ class IC {
// Get the original (non-breakpointed) code object of the caller.
Code* GetOriginalCode() const;
+ bool AddressIsOptimizedCode() const;
+
// Set the call-site target.
inline void set_target(Code* code);
bool is_target_set() { return target_set_; }
+ bool is_vector_set() { return vector_set_; }
bool UseVector() const {
bool use = ICUseVector(kind());
@@ -160,10 +150,10 @@ class IC {
// Configure for most states.
void ConfigureVectorState(IC::State new_state);
// Configure the vector for MONOMORPHIC.
- void ConfigureVectorState(Handle<Name> name, Handle<HeapType> type,
+ void ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Code> handler);
// Configure the vector for POLYMORPHIC.
- void ConfigureVectorState(Handle<Name> name, TypeHandleList* types,
+ void ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
CodeHandleList* handlers);
char TransitionMarkFromState(IC::State state);
@@ -201,7 +191,7 @@ class IC {
void UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name);
bool UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code);
- void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
+ void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
void CopyICToMegamorphicCache(Handle<Name> name);
bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
@@ -224,9 +214,13 @@ class IC {
ExtraICState extra_ic_state() const { return extra_ic_state_; }
void set_extra_ic_state(ExtraICState state) { extra_ic_state_ = state; }
- Handle<HeapType> receiver_type() { return receiver_type_; }
- void update_receiver_type(Handle<Object> receiver) {
- receiver_type_ = CurrentTypeOf(receiver, isolate_);
+ Handle<Map> receiver_map() { return receiver_map_; }
+ void update_receiver_map(Handle<Object> receiver) {
+ if (receiver->IsSmi()) {
+ receiver_map_ = isolate_->factory()->heap_number_map();
+ } else {
+ receiver_map_ = handle(HeapObject::cast(*receiver)->map());
+ }
}
void TargetMaps(MapHandleList* list) {
@@ -236,13 +230,6 @@ class IC {
}
}
- void TargetTypes(TypeHandleList* list) {
- FindTargetMaps();
- for (int i = 0; i < target_maps_.length(); i++) {
- list->Add(MapToType<HeapType>(target_maps_.at(i), isolate_));
- }
- }
-
Map* FirstTargetMap() {
FindTargetMaps();
return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
@@ -302,10 +289,11 @@ class IC {
// The original code target that missed.
Handle<Code> target_;
bool target_set_;
+ bool vector_set_;
State old_state_; // For saving if we marked as prototype failure.
State state_;
Code::Kind kind_;
- Handle<HeapType> receiver_type_;
+ Handle<Map> receiver_map_;
MaybeHandle<Code> maybe_handler_;
ExtraICState extra_ic_state_;
@@ -344,15 +332,17 @@ class CallIC : public IC {
void PatchMegamorphic(Handle<Object> function);
- void HandleMiss(Handle<Object> receiver, Handle<Object> function);
+ void HandleMiss(Handle<Object> function);
// Returns true if a custom handler was installed.
- bool DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
+ bool DoCustomHandler(Handle<Object> function,
const CallICState& callic_state);
// Code generator routines.
static Handle<Code> initialize_stub(Isolate* isolate, int argc,
CallICState::CallType call_type);
+ static Handle<Code> initialize_stub_in_optimized_code(
+ Isolate* isolate, int argc, CallICState::CallType call_type);
static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
};
@@ -481,7 +471,7 @@ class KeyedLoadIC : public LoadIC {
static void GeneratePreMonomorphic(MacroAssembler* masm) {
GenerateMiss(masm);
}
- static void GenerateGeneric(MacroAssembler* masm);
+ static void GenerateMegamorphic(MacroAssembler* masm);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
@@ -492,7 +482,7 @@ class KeyedLoadIC : public LoadIC {
static Handle<Code> initialize_stub(Isolate* isolate);
static Handle<Code> initialize_stub_in_optimized_code(Isolate* isolate);
- static Handle<Code> generic_stub(Isolate* isolate);
+ static Handle<Code> ChooseMegamorphicStub(Isolate* isolate);
static Handle<Code> pre_monomorphic_stub(Isolate* isolate);
static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
@@ -505,8 +495,6 @@ class KeyedLoadIC : public LoadIC {
}
private:
- Handle<Code> generic_stub() const { return generic_stub(isolate()); }
-
static void Clear(Isolate* isolate, Address address, Code* target,
ConstantPoolArray* constant_pool);
@@ -516,24 +504,26 @@ class KeyedLoadIC : public LoadIC {
class StoreIC : public IC {
public:
- class StrictModeState : public BitField<StrictMode, 1, 1> {};
- static ExtraICState ComputeExtraICState(StrictMode flag) {
- return StrictModeState::encode(flag);
+ STATIC_ASSERT(i::LANGUAGE_END == 3);
+ class LanguageModeState : public BitField<LanguageMode, 1, 2> {};
+ static ExtraICState ComputeExtraICState(LanguageMode flag) {
+ return LanguageModeState::encode(flag);
}
- static StrictMode GetStrictMode(ExtraICState state) {
- return StrictModeState::decode(state);
+ static LanguageMode GetLanguageMode(ExtraICState state) {
+ return LanguageModeState::decode(state);
}
// For convenience, a statically declared encoding of strict mode extra
// IC state.
- static const ExtraICState kStrictModeState = 1 << StrictModeState::kShift;
+ static const ExtraICState kStrictModeState = STRICT
+ << LanguageModeState::kShift;
StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
DCHECK(IsStoreStub());
}
- StrictMode strict_mode() const {
- return StrictModeState::decode(extra_ic_state());
+ LanguageMode language_mode() const {
+ return LanguageModeState::decode(extra_ic_state());
}
// Code generators for stub routines. Only called once at startup.
@@ -546,9 +536,10 @@ class StoreIC : public IC {
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode);
+ LanguageMode language_mode);
- static Handle<Code> initialize_stub(Isolate* isolate, StrictMode strict_mode);
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ LanguageMode language_mode);
MUST_USE_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
@@ -559,19 +550,16 @@ class StoreIC : public IC {
JSReceiver::StoreFromKeyed store_mode);
protected:
- Handle<Code> megamorphic_stub() OVERRIDE;
-
// Stub accessors.
- Handle<Code> generic_stub() const;
-
+ Handle<Code> megamorphic_stub() OVERRIDE;
Handle<Code> slow_stub() const;
virtual Handle<Code> pre_monomorphic_stub() const {
- return pre_monomorphic_stub(isolate(), strict_mode());
+ return pre_monomorphic_stub(isolate(), language_mode());
}
static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictMode strict_mode);
+ LanguageMode language_mode);
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -601,14 +589,16 @@ class KeyedStoreIC : public StoreIC {
public:
// ExtraICState bits (building on IC)
// ExtraICState bits
+ // When more language modes are added, these BitFields need to move too.
+ STATIC_ASSERT(i::LANGUAGE_END == 3);
class ExtraICStateKeyedAccessStoreMode
- : public BitField<KeyedAccessStoreMode, 2, 4> {}; // NOLINT
+ : public BitField<KeyedAccessStoreMode, 3, 4> {}; // NOLINT
- class IcCheckTypeField : public BitField<IcCheckType, 6, 1> {};
+ class IcCheckTypeField : public BitField<IcCheckType, 7, 1> {};
- static ExtraICState ComputeExtraICState(StrictMode flag,
+ static ExtraICState ComputeExtraICState(LanguageMode flag,
KeyedAccessStoreMode mode) {
- return StrictModeState::encode(flag) |
+ return LanguageModeState::encode(flag) |
ExtraICStateKeyedAccessStoreMode::encode(mode) |
IcCheckTypeField::encode(ELEMENT);
}
@@ -637,17 +627,17 @@ class KeyedStoreIC : public StoreIC {
}
static void GenerateMiss(MacroAssembler* masm);
static void GenerateSlow(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm, StrictMode strict_mode);
- static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
+ static void GenerateMegamorphic(MacroAssembler* masm,
+ LanguageMode language_mode);
static void GenerateSloppyArguments(MacroAssembler* masm);
protected:
virtual Handle<Code> pre_monomorphic_stub() const {
- return pre_monomorphic_stub(isolate(), strict_mode());
+ return pre_monomorphic_stub(isolate(), language_mode());
}
static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictMode strict_mode) {
- if (strict_mode == STRICT) {
+ LanguageMode language_mode) {
+ if (is_strict(language_mode)) {
return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
} else {
return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 75032e1915..93106ea0e1 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -17,8 +17,8 @@ namespace internal {
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
@@ -27,18 +27,21 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
- if (!getter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ lw(receiver,
+ __ lw(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ push(receiver);
ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -53,8 +56,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- ra : return address
// -----------------------------------
@@ -64,18 +67,22 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Save value register, so we can restore it later.
__ push(value());
- if (!setter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ lw(receiver,
+ __ lw(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ Push(receiver, value());
ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -188,7 +195,8 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Register scratch, Label* miss) {
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
- __ li(scratch, Operand(cell));
+ Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
__ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
@@ -199,16 +207,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ li(scratch, Operand(interceptor));
- __ Push(scratch, receiver, holder);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name, receiver, holder);
}
@@ -222,39 +224,48 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!accessor_holder.is(scratch_in));
DCHECK(!receiver.is(scratch_in));
- // Preparing to push, adjust sp.
- __ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
- __ sw(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver.
+ __ push(receiver);
// Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ sw(arg, MemOperand(sp, (argc - 1 - i) * kPointerSize)); // Push arg.
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch_in.is(store_parameter));
+ __ push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
// Abi for CallApiFunctionStub.
Register callee = a0;
- Register call_data = t0;
+ Register data = t0;
Register holder = a2;
Register api_function_address = a1;
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
+
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ li(holder, api_holder);
+ __ lw(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lw(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ lw(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
+ __ lw(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -262,23 +273,17 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ li(callee, function);
-
bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ li(call_data, api_call_info);
- __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ li(call_data, call_data_obj);
+ __ lw(data, FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(data, FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ lw(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ lw(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
@@ -288,7 +293,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
__ TailCallStub(&stub);
}
@@ -370,21 +375,24 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
- __ lw(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ lw(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
- Handle<Map> current;
while (true) {
- // Do the CompareMap() directly within the Branch() functions.
- current = it.Current();
+ // Compare map directly within the Branch() functions.
+ __ GetWeakValue(scratch, Map::WeakCellForMap(it.Current()));
it.Advance();
if (it.Done()) {
- __ Branch(miss_label, ne, scratch1(), Operand(current));
+ __ Branch(miss_label, ne, map_reg, Operand(scratch));
break;
}
- __ Branch(&do_store, eq, scratch1(), Operand(current));
+ __ Branch(&do_store, eq, map_reg, Operand(scratch));
}
__ bind(&do_store);
}
@@ -395,7 +403,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+ Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -407,8 +415,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
@@ -544,12 +552,15 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
DCHECK(!scratch3().is(reg));
DCHECK(!scratch4().is(reg));
__ push(receiver());
- if (heap()->InNewSpace(callback->data())) {
- __ li(scratch3(), callback);
- __ lw(scratch3(),
- FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ li(scratch3(), data);
} else {
- __ li(scratch3(), Handle<Object>(callback->data(), isolate()));
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch3(), cell);
}
__ Subu(sp, sp, 6 * kPointerSize);
__ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
@@ -656,7 +667,14 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // Receiver.
- __ li(at, Operand(callback)); // Callback info.
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ __ li(at, Operand(callback));
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ li(at, Operand(cell));
+ }
__ push(at);
__ li(at, Operand(name));
__ Push(at, value());
diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc
index 6169404c61..179531235d 100644
--- a/deps/v8/src/ic/mips/ic-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/ic-compiler-mips.cc
@@ -15,7 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -42,7 +42,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Label number_case;
Register match = scratch2();
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
// Polymorphic keyed stores may use the map register
@@ -50,12 +50,11 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
- int receiver_count = types->length();
+ int receiver_count = maps->length();
int number_of_handled_maps = 0;
__ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
+ Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
// Check map and tail call if there's a match.
@@ -63,7 +62,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ GetWeakValue(match, cell);
__ Subu(match, match, Operand(map_reg));
- if (type->Is(HeapType::Number())) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
@@ -121,12 +120,12 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
#define __ ACCESS_MASM(masm)
-void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister());
- __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ li(a0, Operand(Smi::FromInt(language_mode)));
__ Push(a0);
// Do tail-call to runtime routine.
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index 7c8a5eacab..33b78cec58 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -160,12 +160,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* not_fast_array,
- Label* out_of_range) {
+ Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -174,8 +172,6 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
- // elements - holds the elements of the receiver on exit.
- //
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
@@ -183,25 +179,50 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// Scratch registers:
//
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
+ // elements - holds the elements of the receiver and its prototypes.
+ //
+ // scratch1 - used to hold elements length, bit fields, base addresses.
//
- // scratch2 - used to hold the loaded value.
+ // scratch2 - used to hold maps, prototypes, and the loaded value.
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, return_undefined;
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode (not dictionary).
- __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(not_fast_array, ne, scratch1, Operand(at));
- } else {
- __ AssertFastElements(elements);
- }
+ __ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(out_of_range, hs, key, Operand(scratch1));
+ __ Branch(&in_bounds, lo, key, Operand(scratch1));
+ // Out-of-bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ // Negative keys can't take the fast OOB path.
+ __ Branch(slow, lt, key, Operand(zero_reg));
+ __ bind(&check_prototypes);
+ __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&check_next_prototype);
+ __ lw(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
+ // scratch2: current prototype
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(&return_undefined, eq, scratch2, Operand(at));
+ __ lw(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
+ __ lw(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ // elements: elements of current prototype
+ // scratch2: map of current prototype
+ __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+ __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
+ __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
+ __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
+ __ Branch(slow, ne, at, Operand(zero_reg));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ Branch(slow, ne, elements, Operand(at));
+ __ Branch(&check_next_prototype);
+ __ bind(&return_undefined);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done);
+
+ __ bind(&in_bounds);
// Fast case: Do the load.
__ Addu(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -212,10 +233,10 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ lw(scratch2, MemOperand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ Branch(out_of_range, eq, scratch2, Operand(at));
- __ mov(result, scratch2);
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ Branch(&check_prototypes, eq, scratch2, Operand(at));
+ __ Move(result, scratch2);
+ __ bind(&done);
}
@@ -454,7 +475,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in ra.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -478,7 +499,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
__ Ret();
@@ -508,92 +529,33 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Map::kHasNamedInterceptor, &slow);
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
__ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&probe_dictionary, eq, t0, Operand(at));
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ sra(a3, a0, KeyedLookupCache::kMapHashShift);
- __ lw(t0, FieldMemOperand(key, Name::kHashFieldOffset));
- __ sra(at, t0, Name::kHashShift);
- __ xor_(a3, a3, at);
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(a3, a3, Operand(mask));
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
- __ li(t0, Operand(cache_keys));
- __ sll(at, a3, kPointerSizeLog2 + 1);
- __ addu(t0, t0, at);
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
- __ Branch(&try_next_entry, ne, a0, Operand(t1));
- __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
- __ Branch(&hit_on_nth_entry[i], eq, key, Operand(t1));
- __ bind(&try_next_entry);
- }
-
- __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
- __ Branch(&slow, ne, a0, Operand(t1));
- __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
- __ Branch(&slow, ne, key, Operand(t1));
-
- // Get field offset.
- // a0 : receiver's map
- // a3 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ li(t0, Operand(cache_field_offsets));
- __ sll(at, a3, kPointerSizeLog2);
- __ addu(at, t0, at);
- __ lw(t1, MemOperand(at, kPointerSize * i));
- __ lbu(t2, FieldMemOperand(a0, Map::kInObjectPropertiesOffset));
- __ Subu(t1, t1, t2);
- __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
- if (i != 0) {
- __ Branch(&load_in_object_property);
- }
+ if (FLAG_vector_ics) {
+ // When vector ics are in use, the handlers in the stub cache expect a
+ // vector and slot. Since we won't change the IC from any downstream
+ // misses, a dummy vector can be used.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(int_slot)));
}
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ lbu(t2, FieldMemOperand(a0, Map::kInstanceSizeOffset));
- __ addu(t2, t2, t1); // Index from start of object.
- __ Subu(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ sll(at, t2, kPointerSizeLog2);
- __ addu(at, receiver, at);
- __ lw(v0, MemOperand(at));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- t0, a3);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ lw(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Addu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
- __ sll(v0, t1, kPointerSizeLog2);
- __ Addu(v0, v0, receiver);
- __ lw(v0, MemOperand(v0));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- t0, a3);
- __ Ret();
-
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::LOAD_IC, flags, false, receiver, key, t0, t1, t2, t5);
+ // Cache miss.
+ GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
@@ -762,7 +724,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -813,7 +775,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// a0: value.
// a1: key.
// a2: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
@@ -1013,10 +975,10 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
- DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+ DCHECK_EQ(0u, Assembler::GetImmediate16(instr_at_patch));
patcher.masm()->andi(at, reg, kSmiTagMask);
} else {
- DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+ DCHECK_EQ(check, DISABLE_INLINED_SMI_CHECK);
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
patcher.masm()->andi(at, reg, 0);
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index d3b861bc25..a68a418fa2 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -17,8 +17,8 @@ namespace internal {
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
@@ -27,18 +27,21 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
- if (!getter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ ld(receiver,
+ __ ld(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ push(receiver);
ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -53,8 +56,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- ra : return address
// -----------------------------------
@@ -64,18 +67,22 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Save value register, so we can restore it later.
__ push(value());
- if (!setter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ ld(receiver,
+ __ ld(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ Push(receiver, value());
ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -189,7 +196,8 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Register scratch, Label* miss) {
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
- __ li(scratch, Operand(cell));
+ Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
__ ld(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
@@ -200,16 +208,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ li(scratch, Operand(interceptor));
- __ Push(scratch, receiver, holder);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+ __ Push(name, receiver, holder);
}
@@ -223,39 +225,48 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!accessor_holder.is(scratch_in));
DCHECK(!receiver.is(scratch_in));
- // Preparing to push, adjust sp.
- __ Dsubu(sp, sp, Operand((argc + 1) * kPointerSize));
- __ sd(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver.
+ __ push(receiver);
// Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ sd(arg, MemOperand(sp, (argc - 1 - i) * kPointerSize)); // Push arg.
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch_in.is(store_parameter));
+ __ push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
// Abi for CallApiFunctionStub.
Register callee = a0;
- Register call_data = a4;
+ Register data = a4;
Register holder = a2;
Register api_function_address = a1;
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
+
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ li(holder, api_holder);
+ __ ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ ld(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
+ __ ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -263,23 +274,17 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ li(callee, function);
-
bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ li(call_data, api_call_info);
- __ ld(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ li(call_data, call_data_obj);
+ __ ld(data, FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(data, FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
@@ -289,7 +294,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
__ TailCallStub(&stub);
}
@@ -371,21 +376,24 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
- __ ld(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ ld(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
- Handle<Map> current;
while (true) {
- // Do the CompareMap() directly within the Branch() functions.
- current = it.Current();
+ // Compare map directly within the Branch() functions.
+ __ GetWeakValue(scratch, Map::WeakCellForMap(it.Current()));
it.Advance();
if (it.Done()) {
- __ Branch(miss_label, ne, scratch1(), Operand(current));
+ __ Branch(miss_label, ne, map_reg, Operand(scratch));
break;
}
- __ Branch(&do_store, eq, scratch1(), Operand(current));
+ __ Branch(&do_store, eq, map_reg, Operand(scratch));
}
__ bind(&do_store);
}
@@ -396,7 +404,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+ Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -408,8 +416,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
@@ -545,12 +553,15 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
DCHECK(!scratch3().is(reg));
DCHECK(!scratch4().is(reg));
__ push(receiver());
- if (heap()->InNewSpace(callback->data())) {
- __ li(scratch3(), callback);
- __ ld(scratch3(),
- FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ li(scratch3(), data);
} else {
- __ li(scratch3(), Handle<Object>(callback->data(), isolate()));
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch3(), cell);
}
__ Dsubu(sp, sp, 6 * kPointerSize);
__ sd(scratch3(), MemOperand(sp, 5 * kPointerSize));
@@ -657,7 +668,14 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // Receiver.
- __ li(at, Operand(callback)); // Callback info.
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ __ li(at, Operand(callback));
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ li(at, Operand(cell));
+ }
__ push(at);
__ li(at, Operand(name));
__ Push(at, value());
diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
index 7ed4492ee3..a3dcdf7207 100644
--- a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
@@ -15,7 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -42,7 +42,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Label number_case;
Register match = scratch2();
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
// Polymorphic keyed stores may use the map register
@@ -50,12 +50,11 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
- int receiver_count = types->length();
+ int receiver_count = maps->length();
int number_of_handled_maps = 0;
__ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
+ Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
// Check map and tail call if there's a match.
@@ -63,7 +62,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ GetWeakValue(match, cell);
__ Dsubu(match, match, Operand(map_reg));
- if (type->Is(HeapType::Number())) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
@@ -121,12 +120,12 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
#define __ ACCESS_MASM(masm)
-void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister());
- __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ li(a0, Operand(Smi::FromInt(language_mode)));
__ Push(a0);
// Do tail-call to runtime routine.
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 7ac191c395..7b22b8c3e2 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -159,12 +159,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* not_fast_array,
- Label* out_of_range) {
+ Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -173,8 +171,6 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
- // elements - holds the elements of the receiver on exit.
- //
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
@@ -182,25 +178,50 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// Scratch registers:
//
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
+ // elements - holds the elements of the receiver and its prototypes.
+ //
+ // scratch1 - used to hold elements length, bit fields, base addresses.
//
- // scratch2 - used to hold the loaded value.
+ // scratch2 - used to hold maps, prototypes, and the loaded value.
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, return_undefined;
__ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode (not dictionary).
- __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(not_fast_array, ne, scratch1, Operand(at));
- } else {
- __ AssertFastElements(elements);
- }
+ __ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(out_of_range, hs, key, Operand(scratch1));
+ __ Branch(&in_bounds, lo, key, Operand(scratch1));
+ // Out-of-bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ // Negative keys can't take the fast OOB path.
+ __ Branch(slow, lt, key, Operand(zero_reg));
+ __ bind(&check_prototypes);
+ __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&check_next_prototype);
+ __ ld(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
+ // scratch2: current prototype
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(&return_undefined, eq, scratch2, Operand(at));
+ __ ld(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
+ __ ld(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ // elements: elements of current prototype
+ // scratch2: map of current prototype
+ __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+ __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
+ __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
+ __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
+ __ Branch(slow, ne, at, Operand(zero_reg));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ Branch(slow, ne, elements, Operand(at));
+ __ Branch(&check_next_prototype);
+ __ bind(&return_undefined);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done);
+
+ __ bind(&in_bounds);
// Fast case: Do the load.
__ Daddu(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -211,10 +232,10 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ ld(scratch2, MemOperand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ Branch(out_of_range, eq, scratch2, Operand(at));
- __ mov(result, scratch2);
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ Branch(&check_prototypes, eq, scratch2, Operand(at));
+ __ Move(result, scratch2);
+ __ bind(&done);
}
@@ -452,7 +473,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in ra.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -476,7 +497,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
__ Ret();
@@ -506,99 +527,33 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Map::kHasNamedInterceptor, &slow);
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
__ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&probe_dictionary, eq, a4, Operand(at));
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ dsll32(a3, a0, 0);
- __ dsrl32(a3, a3, 0);
- __ dsra(a3, a3, KeyedLookupCache::kMapHashShift);
- __ lwu(a4, FieldMemOperand(key, Name::kHashFieldOffset));
- __ dsra(at, a4, Name::kHashShift);
- __ xor_(a3, a3, at);
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(a3, a3, Operand(mask));
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
- __ li(a4, Operand(cache_keys));
- __ dsll(at, a3, kPointerSizeLog2 + 1);
- __ daddu(a4, a4, at);
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ ld(a5, MemOperand(a4, kPointerSize * i * 2));
- __ Branch(&try_next_entry, ne, a0, Operand(a5));
- __ ld(a5, MemOperand(a4, kPointerSize * (i * 2 + 1)));
- __ Branch(&hit_on_nth_entry[i], eq, key, Operand(a5));
- __ bind(&try_next_entry);
- }
-
- __ ld(a5, MemOperand(a4, kPointerSize * (kEntriesPerBucket - 1) * 2));
- __ Branch(&slow, ne, a0, Operand(a5));
- __ ld(a5, MemOperand(a4, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
- __ Branch(&slow, ne, key, Operand(a5));
-
- // Get field offset.
- // a0 : receiver's map
- // a3 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ li(a4, Operand(cache_field_offsets));
-
- // TODO(yy) This data structure does NOT follow natural pointer size.
- __ dsll(at, a3, kPointerSizeLog2 - 1);
- __ daddu(at, a4, at);
- __ lwu(a5, MemOperand(at, kPointerSize / 2 * i));
-
- __ lbu(a6, FieldMemOperand(a0, Map::kInObjectPropertiesOffset));
- __ Dsubu(a5, a5, a6);
- __ Branch(&property_array_property, ge, a5, Operand(zero_reg));
- if (i != 0) {
- __ Branch(&load_in_object_property);
- }
+ if (FLAG_vector_ics) {
+ // When vector ics are in use, the handlers in the stub cache expect a
+ // vector and slot. Since we won't change the IC from any downstream
+ // misses, a dummy vector can be used.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(int_slot)));
}
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ lbu(a6, FieldMemOperand(a0, Map::kInstanceSizeOffset));
- // Index from start of object.
- __ daddu(a6, a6, a5);
- // Remove the heap tag.
- __ Dsubu(receiver, receiver, Operand(kHeapObjectTag));
- __ dsll(at, a6, kPointerSizeLog2);
- __ daddu(at, receiver, at);
- __ ld(v0, MemOperand(at));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- a4, a3);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ ld(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Daddu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
- __ dsll(v0, a5, kPointerSizeLog2);
- __ Daddu(v0, v0, a1);
- __ ld(v0, MemOperand(v0));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- a4, a3);
- __ Ret();
-
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::LOAD_IC, flags, false, receiver, key, a4, a5, a6, t1);
+ // Cache miss.
+ GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
@@ -771,7 +726,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -822,7 +777,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// a0: value.
// a1: key.
// a2: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
@@ -1020,10 +975,10 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
- DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+ DCHECK_EQ(0u, Assembler::GetImmediate16(instr_at_patch));
patcher.masm()->andi(at, reg, kSmiTagMask);
} else {
- DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+ DCHECK_EQ(check, DISABLE_INLINED_SMI_CHECK);
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
patcher.masm()->andi(at, reg, 0);
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 2f29c83412..4283d39f22 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -18,7 +18,8 @@ namespace internal {
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
+ Register holder, int accessor_index, int expected_arguments,
+ Register scratch) {
// ----------- S t a t e -------------
// -- r3 : receiver
// -- r5 : name
@@ -27,18 +28,21 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- if (!getter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ LoadP(receiver,
+ __ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ push(receiver);
ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(r4, holder, accessor_index, ACCESSOR_GETTER);
+ __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -54,7 +58,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
+ Register holder, int accessor_index, int expected_arguments,
+ Register scratch) {
// ----------- S t a t e -------------
// -- lr : return address
// -----------------------------------
@@ -64,18 +69,22 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Save value register, so we can restore it later.
__ push(value());
- if (!setter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ LoadP(receiver,
+ __ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ Push(receiver, value());
ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(r4, holder, accessor_index, ACCESSOR_SETTER);
+ __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -92,6 +101,26 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ addi(sp, sp, Operand(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -140,27 +169,16 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
+ MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ LoadP(scratch, MemOperand(cp, offset));
- __ LoadP(scratch,
- FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ LoadP(scratch, MemOperand(scratch, Context::SlotOffset(index)));
- __ Move(ip, function);
- __ cmp(ip, scratch);
- __ bne(miss);
-
+ __ LoadP(result, MemOperand(cp, offset));
+ __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ __ LoadP(result, MemOperand(result, Context::SlotOffset(index)));
// Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
+ __ LoadP(result,
+ FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ LoadP(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+ __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
}
@@ -181,7 +199,8 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Register scratch, Label* miss) {
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
- __ mov(scratch, Operand(cell));
+ Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
__ LoadP(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
@@ -193,16 +212,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
__ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Operand(interceptor));
- __ push(scratch);
__ push(receiver);
__ push(holder);
}
@@ -218,37 +231,48 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!accessor_holder.is(scratch_in));
DCHECK(!receiver.is(scratch_in));
__ push(receiver);
// Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ push(arg);
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch_in.is(store_parameter));
+ __ push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
// Abi for CallApiFunctionStub.
Register callee = r3;
- Register call_data = r7;
+ Register data = r7;
Register holder = r5;
Register api_function_address = r4;
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
+
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ Move(holder, api_holder);
+ __ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ LoadP(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
+ __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -256,24 +280,20 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ Move(callee, function);
-
bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ Move(call_data, api_call_info);
- __ LoadP(call_data,
- FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ Move(call_data, call_data_obj);
+ __ LoadP(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadP(data,
+ FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
@@ -284,7 +304,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
__ TailCallStub(&stub);
}
@@ -328,18 +348,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
- Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ mov(this->name(), Operand(name));
- __ mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
}
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register scratch,
+ Label* miss) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+ Register map_reg = StoreTransitionDescriptor::MapRegister();
+ DCHECK(!map_reg.is(scratch));
+ __ LoadWeakValue(map_reg, cell, miss);
+ if (transition->CanBeDeprecated()) {
+ __ lwz(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+ __ DecodeField<Map::Deprecated>(r0, scratch, SetRC);
+ __ bne(miss, cr0);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+ int descriptor,
Register value_reg,
+ Register scratch,
Label* miss_label) {
- __ Move(scratch1(), handle(constant, isolate()));
- __ cmp(value_reg, scratch1());
+ DCHECK(!map_reg.is(scratch));
+ DCHECK(!map_reg.is(value_reg));
+ DCHECK(!value_reg.is(scratch));
+ __ LoadInstanceDescriptors(map_reg, scratch);
+ __ LoadP(scratch, FieldMemOperand(
+ scratch, DescriptorArray::GetValueOffset(descriptor)));
+ __ cmp(value_reg, scratch);
__ bne(miss_label);
}
@@ -347,13 +387,17 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
- __ LoadP(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
- __ CompareMap(scratch1(), it.Current(), &do_store);
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
it.Advance();
if (it.Done()) {
__ bne(miss_label);
@@ -418,11 +462,11 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ LoadP(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Register map_reg = scratch1;
+ __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
- // CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
- } else {
- __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ bne(miss);
}
// Check access rights to the global object. This has to happen after
@@ -440,17 +484,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map =
- heap()->InNewSpace(*prototype) || depth == 1;
- if (load_prototype_from_map) {
- __ LoadP(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- __ mov(reg, Operand(prototype));
- }
+ __ LoadP(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
}
// Go to the next object in the prototype chain.
@@ -463,7 +497,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ bne(miss);
}
// Perform security check for access to the global object.
@@ -483,6 +520,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
@@ -522,12 +563,16 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
DCHECK(!scratch3().is(reg));
DCHECK(!scratch4().is(reg));
__ push(receiver());
- if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch3(), callback);
- __ LoadP(scratch3(),
- FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+ // Push data from ExecutableAccessorInfo.
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ Move(scratch3(), data);
} else {
- __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch3(), cell);
}
__ push(scratch3());
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
@@ -580,6 +625,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
@@ -597,6 +643,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
__ Ret();
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
__ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
@@ -624,12 +671,11 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Handle<JSObject> object, Handle<Name> name, int accessor_index) {
+ Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // receiver
- __ mov(ip, Operand(callback)); // callback info
+ __ LoadSmiLiteral(ip, Smi::FromInt(accessor_index));
__ push(ip);
__ mov(ip, Operand(name));
__ Push(ip, value());
@@ -666,11 +712,15 @@ Register NamedStoreHandlerCompiler::value() {
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
- __ mov(result, Operand(cell));
+ Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+ __ LoadWeakValue(result, weak_cell, &miss);
__ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
@@ -682,6 +732,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r4, r6);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret();
FrontendFooter(name, &miss);
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
index c86845646e..820ab929ca 100644
--- a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
@@ -15,13 +15,11 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
+ __ mov(r0, Operand(Smi::FromInt(language_mode)));
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode)));
- __ Push(r0);
+ StoreDescriptor::ValueRegister(), r0);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 4, 1);
@@ -41,9 +39,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ LoadP(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
@@ -72,8 +73,8 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
- __ mov(ip, Operand(map));
- __ cmp(map_reg, ip);
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
@@ -100,16 +101,18 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
__ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
- __ LoadP(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ Register map_reg = scratch1();
+ __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
- __ mov(ip, Operand(receiver_maps->at(i)));
- __ cmp(scratch1(), ip);
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+ __ CmpWeakValue(map_reg, cell, scratch2());
if (transitioned_maps->at(i).is_null()) {
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
} else {
Label next_map;
__ bne(&next_map);
- __ mov(transition_map(), Operand(transitioned_maps->at(i)));
+ Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+ __ LoadWeakValue(transition_map(), cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 97c3e2fcc9..d0a2177f20 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -164,12 +164,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* not_fast_array,
- Label* out_of_range) {
+ Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -178,8 +176,6 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
- // elements - holds the elements of the receiver on exit.
- //
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
@@ -187,37 +183,62 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// Scratch registers:
//
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
+ // elements - holds the elements of the receiver and its protoypes.
+ //
+ // scratch1 - used to hold elements length, bit fields, base addresses.
//
- // scratch2 - used to hold the loaded value.
+ // scratch2 - used to hold maps, prototypes, and the loaded value.
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, return_undefined;
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ LoadP(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ bne(not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
+ __ AssertFastElements(elements);
+
// Check that the key (index) is within bounds.
__ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmpl(key, scratch1);
- __ bge(out_of_range);
+ __ blt(&in_bounds);
+ // Out-of-bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ __ cmpi(key, Operand::Zero());
+ __ blt(slow); // Negative keys can't take the fast OOB path.
+ __ bind(&check_prototypes);
+ __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&check_next_prototype);
+ __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
+ // scratch2: current prototype
+ __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
+ __ beq(&return_undefined);
+ __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
+ __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ // elements: elements of current prototype
+ // scratch2: map of current prototype
+ __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
+ __ blt(slow);
+ __ lbz(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
+ __ andi(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
+ __ bne(slow, cr0);
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+ __ bne(slow);
+ __ jmp(&check_next_prototype);
+
+ __ bind(&return_undefined);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&in_bounds);
// Fast case: Do the load.
__ addi(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
__ SmiToPtrArrayOffset(scratch2, key);
__ LoadPX(scratch2, MemOperand(scratch2, scratch1));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ beq(out_of_range);
+ __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ beq(&check_prototypes);
__ mr(result, scratch2);
+ __ bind(&done);
}
@@ -275,18 +296,35 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
static const Register LoadIC_TempRegister() { return r6; }
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ if (FLAG_vector_ics) {
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+
+ __ Push(receiver, name, slot, vector);
+ } else {
+ __ Push(receiver, name);
+ }
+}
+
+
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r6, r7);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r7, r8, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, r7, r8);
- __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -415,15 +453,18 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r6, r7);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r7, r8, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r7, r8);
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+ LoadIC_PushArgs(masm);
// Perform tail call to the entry.
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
+ int arg_count = FLAG_vector_ics ? 4 : 2;
+ __ TailCallExternalReference(ref, arg_count, 1);
}
@@ -436,7 +477,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -460,7 +501,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r3, r6, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6);
__ Ret();
@@ -490,106 +531,35 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
Map::kHasNamedInterceptor, &slow);
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
__ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r7, ip);
__ beq(&probe_dictionary);
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ srawi(r6, r3, KeyedLookupCache::kMapHashShift);
- __ lwz(r7, FieldMemOperand(key, Name::kHashFieldOffset));
- __ srawi(r7, r7, Name::kHashShift);
- __ xor_(r6, r6, r7);
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ mov(r7, Operand(mask));
- __ and_(r6, r6, r7, LeaveRC);
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ mov(r7, Operand(cache_keys));
- __ mr(r0, r5);
- __ ShiftLeftImm(r5, r6, Operand(kPointerSizeLog2 + 1));
- __ add(r7, r7, r5);
- __ mr(r5, r0);
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and move r7 to next entry.
- __ LoadP(r8, MemOperand(r7));
- __ addi(r7, r7, Operand(kPointerSize * 2));
- __ cmp(r3, r8);
- __ bne(&try_next_entry);
- __ LoadP(r8, MemOperand(r7, -kPointerSize)); // Load name
- __ cmp(key, r8);
- __ beq(&hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- // Last entry: Load map and move r7 to name.
- __ LoadP(r8, MemOperand(r7));
- __ addi(r7, r7, Operand(kPointerSize));
- __ cmp(r3, r8);
- __ bne(&slow);
- __ LoadP(r8, MemOperand(r7));
- __ cmp(key, r8);
- __ bne(&slow);
- // Get field offset.
- // r3 : receiver's map
- // r6 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ mov(r7, Operand(cache_field_offsets));
- if (i != 0) {
- __ addi(r6, r6, Operand(i));
- }
- __ ShiftLeftImm(r8, r6, Operand(2));
- __ lwzx(r8, MemOperand(r8, r7));
- __ lbz(r9, FieldMemOperand(r3, Map::kInObjectPropertiesOffset));
- __ sub(r8, r8, r9);
- __ cmpi(r8, Operand::Zero());
- __ bge(&property_array_property);
- if (i != 0) {
- __ b(&load_in_object_property);
- }
+ if (FLAG_vector_ics) {
+ // When vector ics are in use, the handlers in the stub cache expect a
+ // vector and slot. Since we won't change the IC from any downstream
+ // misses, a dummy vector can be used.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ __ LoadSmiLiteral(slot, Smi::FromInt(int_slot));
}
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ lbz(r9, FieldMemOperand(r3, Map::kInstanceSizeOffset));
- __ add(r9, r9, r8); // Index from start of object.
- __ subi(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ ShiftLeftImm(r3, r9, Operand(kPointerSizeLog2));
- __ LoadPX(r3, MemOperand(r3, receiver));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- r7, r6);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ LoadP(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ addi(receiver, receiver,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ShiftLeftImm(r3, r8, Operand(kPointerSizeLog2));
- __ LoadPX(r3, MemOperand(r3, receiver));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
- r7, r6);
- __ Ret();
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::KEYED_LOAD_IC, flags, false, receiver, key, r7, r8, r9, r10);
+ // Cache miss.
+ GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
@@ -770,7 +740,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
// ---------- S t a t e --------------
// -- r3 : value
// -- r4 : key
@@ -826,7 +796,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// r3: value.
// r4: key.
// r5: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
@@ -835,8 +805,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(r7, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, r6, r7, r8, r9);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, r6, r7, r8, r9);
// Cache miss.
__ b(&miss);
@@ -897,8 +867,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, r6, r7, r8, r9);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, r6, r7, r8, r9);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/deps/v8/src/ic/ppc/stub-cache-ppc.cc b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
index 816a2ae649..50f1f99850 100644
--- a/deps/v8/src/ic/ppc/stub-cache-ppc.cc
+++ b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
@@ -7,7 +7,9 @@
#if V8_TARGET_ARCH_PPC
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
@@ -48,8 +50,14 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// Calculate the base address of the entry.
__ mov(base_addr, Operand(key_offset));
- __ ShiftLeftImm(scratch2, offset_scratch, Operand(kPointerSizeLog2));
- __ add(base_addr, base_addr, scratch2);
+#if V8_TARGET_ARCH_PPC64
+ DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
+ __ ShiftLeftImm(offset_scratch, offset_scratch,
+ Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
+#else
+ DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
+#endif
+ __ add(base_addr, base_addr, offset_scratch);
// Check that the key in the entry matches the name.
__ LoadP(ip, MemOperand(base_addr, 0));
@@ -99,10 +107,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -120,15 +129,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check scratch, extra and extra2 registers are valid.
DCHECK(!scratch.is(no_reg));
@@ -136,6 +137,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
@@ -147,34 +159,24 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, ip);
-#if V8_TARGET_ARCH_PPC64
- // Use only the low 32 bits of the map pointer.
- __ rldicl(scratch, scratch, 0, 32);
-#endif
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ ShiftRightImm(scratch, scratch, Operand(kCacheIndexShift));
- // Mask down the eor argument to the minimum to keep the immediate
- // encodable.
- __ xori(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
- // Prefer and_ to ubfx here because ubfx takes 2 cycles.
- __ andi(scratch, scratch, Operand(mask));
+ __ xori(scratch, scratch, Operand(flags));
+ // The mask omits the last two bits because they are not part of the hash.
+ __ andi(scratch, scratch,
+ Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
- __ ShiftRightImm(extra, name, Operand(kCacheIndexShift));
- __ sub(scratch, scratch, extra);
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ addi(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
- __ andi(scratch, scratch, Operand(mask2));
+ __ sub(scratch, scratch, name);
+ __ addi(scratch, scratch, Operand(flags));
+ __ andi(scratch, scratch,
+ Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 46fa8cc337..485d87dc57 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -104,15 +104,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
__ Push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- __ Move(kScratchRegister, interceptor);
- __ Push(kScratchRegister);
__ Push(receiver);
__ Push(holder);
}
@@ -128,42 +123,53 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
+ Handle<Map> receiver_map, Register receiver, Register scratch,
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!accessor_holder.is(scratch));
DCHECK(optimization.is_simple_api_call());
- __ PopReturnAddressTo(scratch_in);
+ __ PopReturnAddressTo(scratch);
// receiver
__ Push(receiver);
// Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ Push(arg);
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch.is(store_parameter));
+ __ Push(store_parameter);
}
- __ PushReturnAddressFrom(scratch_in);
+ __ PushReturnAddressFrom(scratch);
// Stack now matches JSFunction abi.
// Abi for CallApiFunctionStub.
- Register callee = rax;
- Register call_data = rbx;
+ Register callee = rdi;
+ Register data = rbx;
Register holder = rcx;
Register api_function_address = rdx;
- Register scratch = rdi; // scratch_in is no longer valid.
+ scratch = no_reg;
+
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ Move(holder, api_holder);
+ __ movp(holder, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(holder, FieldOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ movp(holder, FieldOperand(holder, HeapObject::kMapOffset));
+ __ movp(holder, FieldOperand(holder, Map::kPrototypeOffset));
+ }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -171,23 +177,17 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ Move(callee, function);
-
bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ Move(scratch, api_call_info);
- __ movp(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ Move(call_data, call_data_obj);
+ __ movp(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ movp(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ movp(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
@@ -196,7 +196,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
__ TailCallStub(&stub);
}
@@ -206,16 +206,17 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Register scratch, Label* miss) {
Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
- __ Move(scratch, cell);
- __ Cmp(FieldOperand(scratch, Cell::kValueOffset),
- masm->isolate()->factory()->the_hole_value());
+ Factory* factory = masm->isolate()->factory();
+ Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
+ __ Cmp(FieldOperand(scratch, Cell::kValueOffset), factory->the_hole_value());
__ j(not_equal, miss);
}
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -----------------------------------
@@ -225,18 +226,23 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Save value register, so we can restore it later.
__ Push(value());
- if (!setter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ movp(receiver,
+ __ movp(scratch,
FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ Push(receiver);
__ Push(value());
ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_SETTER);
+ __ InvokeFunction(rdi, expected, actual, CALL_FUNCTION,
NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
@@ -255,8 +261,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -265,17 +271,21 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
- if (!getter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ movp(receiver,
+ __ movp(scratch,
FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ Push(receiver);
ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_GETTER);
+ __ InvokeFunction(rdi, expected, actual, CALL_FUNCTION,
NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
@@ -377,12 +387,17 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
Label do_store;
+ __ movp(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
while (true) {
- __ CompareMap(value_reg, it.Current());
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
@@ -399,7 +414,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+ Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -413,8 +428,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
@@ -541,13 +556,17 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
__ Push(receiver()); // receiver
- if (heap()->InNewSpace(callback->data())) {
- DCHECK(!scratch2().is(reg));
- __ Move(scratch2(), callback);
- __ Push(FieldOperand(scratch2(),
- ExecutableAccessorInfo::kDataOffset)); // data
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ Push(data);
} else {
- __ Push(Handle<Object>(callback->data(), isolate()));
+ DCHECK(!scratch2().is(reg));
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch2(), cell);
+ __ Push(scratch2());
}
DCHECK(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
@@ -664,7 +683,14 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ PopReturnAddressTo(scratch1());
__ Push(receiver());
__ Push(holder_reg);
- __ Push(callback); // callback info
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ __ Push(callback);
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ Push(cell);
+ }
__ Push(name);
__ Push(value());
__ PushReturnAddressFrom(scratch1());
diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc
index 5be9c465cc..89e7aee7ff 100644
--- a/deps/v8/src/ic/x64/ic-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/ic-compiler-x64.cc
@@ -15,8 +15,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
// Return address is on the stack.
DCHECK(!rbx.is(StoreDescriptor::ReceiverRegister()) &&
!rbx.is(StoreDescriptor::NameRegister()) &&
@@ -26,7 +26,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
__ Push(StoreDescriptor::ReceiverRegister());
__ Push(StoreDescriptor::NameRegister());
__ Push(StoreDescriptor::ValueRegister());
- __ Push(Smi::FromInt(strict_mode));
+ __ Push(Smi::FromInt(language_mode));
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -72,7 +72,7 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
}
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -99,7 +99,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
}
Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target);
// Polymorphic keyed stores may use the map register
@@ -107,17 +107,16 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
__ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
+ int receiver_count = maps->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
+ Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
// Check map and tail call if there's a match
__ CmpWeakValue(map_reg, cell, scratch2());
- if (type->Is(HeapType::Number())) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 2709a85718..c183febbb8 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -168,11 +168,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch, Register result,
- Label* not_fast_array, Label* out_of_range) {
+ Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -181,8 +180,6 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
- // elements - holds the elements of the receiver on exit.
- //
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
@@ -190,32 +187,58 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
//
// Scratch registers:
//
- // scratch - used to hold elements of the receiver and the loaded value.
+ // elements - holds the elements of the receiver and its prototypes.
+ //
+ // scratch - used to hold maps, prototypes, and the loaded value.
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, return_undefined;
__ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
+ __ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
// Unsigned comparison rejects negative indices.
- __ j(above_equal, out_of_range);
+ __ j(below, &in_bounds);
+
+ // Out-of-bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ __ SmiCompare(key, Smi::FromInt(0));
+ __ j(less, slow); // Negative keys can't take the fast OOB path.
+ __ bind(&check_prototypes);
+ __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&check_next_prototype);
+ __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
+ // scratch: current prototype
+ __ CompareRoot(scratch, Heap::kNullValueRootIndex);
+ __ j(equal, &return_undefined);
+ __ movp(elements, FieldOperand(scratch, JSObject::kElementsOffset));
+ __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ // elements: elements of current prototype
+ // scratch: map of current prototype
+ __ CmpInstanceType(scratch, JS_OBJECT_TYPE);
+ __ j(below, slow);
+ __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
+ __ j(not_zero, slow);
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+ __ j(not_equal, slow);
+ __ jmp(&check_next_prototype);
+
+ __ bind(&return_undefined);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&in_bounds);
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
__ movp(scratch, FieldOperand(elements, index.reg, index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ movp(result, scratch);
- }
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ j(equal, &check_prototypes);
+ __ Move(result, scratch);
+ __ bind(&done);
}
@@ -251,7 +274,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -273,7 +296,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(rax, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
__ ret(0);
@@ -302,86 +325,35 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(masm, receiver, rax, Map::kHasNamedInterceptor,
&slow);
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary leaving result in key.
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
__ movp(rbx, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movl(rax, rbx);
- __ shrl(rax, Immediate(KeyedLookupCache::kMapHashShift));
- __ movl(rdi, FieldOperand(key, String::kHashFieldOffset));
- __ shrl(rdi, Immediate(String::kHashShift));
- __ xorp(rax, rdi);
- int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- __ andp(rax, Immediate(mask));
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ movp(rdi, rax);
- __ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
- __ LoadAddress(kScratchRegister, cache_keys);
- int off = kPointerSize * i * 2;
- __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
- __ j(not_equal, &try_next_entry);
- __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
- __ j(equal, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
- __ j(not_equal, &slow);
- __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
- __ j(not_equal, &slow);
-
- // Get field offset, which is a 32-bit integer.
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- if (i != 0) {
- __ addl(rax, Immediate(i));
- }
- __ LoadAddress(kScratchRegister, cache_field_offsets);
- __ movl(rdi, Operand(kScratchRegister, rax, times_4, 0));
- __ movzxbp(rax, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subp(rdi, rax);
- __ j(above_equal, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
+ Register megamorphic_scratch = rdi;
+ if (FLAG_vector_ics) {
+ // When vector ics are in use, the handlers in the stub cache expect a
+ // vector and slot. Since we won't change the IC from any downstream
+ // misses, a dummy vector can be used.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(megamorphic_scratch, vector, slot));
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ masm->isolate()->factory()->keyed_load_dummy_vector());
+ int int_slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ Move(vector, dummy_vector);
+ __ Move(slot, Smi::FromInt(int_slot));
}
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ movzxbp(rax, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addp(rax, rdi);
- __ movp(rax, FieldOperand(receiver, rax, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ movp(rax, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ movp(rax,
- FieldOperand(rax, rdi, times_pointer_size, FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
+ false, receiver, key,
+ megamorphic_scratch, no_reg);
+ // Cache miss.
+ GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
@@ -540,7 +512,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
// Return address is on the stack.
Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
@@ -580,7 +552,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ bind(&slow);
__ Integer32ToSmi(key, key);
__ bind(&slow_with_tagged_index);
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
diff --git a/deps/v8/src/ic/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/stub-cache-x64.cc
index f15635c6b9..4be0d5b330 100644
--- a/deps/v8/src/ic/x64/stub-cache-x64.cc
+++ b/deps/v8/src/ic/x64/stub-cache-x64.cc
@@ -30,7 +30,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
: kPointerSizeLog2 == StubCache::kCacheIndexShift);
ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
- DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
+ DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
// and shifting optimizations).
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 2ff35954c8..2eb10c3a3b 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -17,22 +17,26 @@ namespace internal {
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- if (!getter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ mov(receiver,
+ __ mov(scratch,
FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ push(receiver);
ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
+ __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
@@ -137,42 +141,53 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
// when api call ICs are generated in hydrogen.
-void PropertyHandlerCompiler::GenerateFastApiCall(
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
+ Handle<Map> receiver_map, Register receiver, Register scratch,
+ bool is_store, Register store_parameter, Register accessor_holder,
+ int accessor_index) {
+ DCHECK(!accessor_holder.is(scratch));
// Copy return value.
- __ pop(scratch_in);
+ __ pop(scratch);
// receiver
__ push(receiver);
// Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ push(arg);
+ if (is_store) {
+ DCHECK(!receiver.is(store_parameter));
+ DCHECK(!scratch.is(store_parameter));
+ __ push(store_parameter);
}
- __ push(scratch_in);
+ __ push(scratch);
// Stack now matches JSFunction abi.
DCHECK(optimization.is_simple_api_call());
// Abi for CallApiFunctionStub.
- Register callee = eax;
- Register call_data = ebx;
+ Register callee = edi;
+ Register data = ebx;
Register holder = ecx;
Register api_function_address = edx;
- Register scratch = edi; // scratch_in is no longer valid.
+ scratch = no_reg;
+
+ // Put callee in place.
+ __ LoadAccessor(callee, accessor_holder, accessor_index,
+ is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ int holder_depth = 0;
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+ &holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ LoadHeapObject(holder, api_holder);
+ __ mov(holder, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
+ for (int i = 1; i < holder_depth; i++) {
+ __ mov(holder, FieldOperand(holder, HeapObject::kMapOffset));
+ __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
+ }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -180,23 +195,17 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
}
Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ LoadHeapObject(callee, function);
-
bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ mov(scratch, api_call_info);
- __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
+ // Put call data in place.
+ if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
- __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
+ __ mov(data, Immediate(isolate->factory()->undefined_value()));
} else {
- __ mov(call_data, call_data_obj);
+ __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
@@ -204,7 +213,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall(
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
__ TailCallStub(&stub);
}
@@ -217,21 +226,18 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Register scratch, Label* miss) {
Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
- Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
- if (masm->serializer_enabled()) {
- __ mov(scratch, Immediate(cell));
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(the_hole));
- } else {
- __ cmp(Operand::ForCell(cell), Immediate(the_hole));
- }
+ Factory* factory = masm->isolate()->factory();
+ Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
+ __ LoadWeakValue(scratch, weak_cell, miss);
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(factory->the_hole_value()));
__ j(not_equal, miss);
}
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- esp[0] : return address
// -----------------------------------
@@ -241,18 +247,22 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Save value register, so we can restore it later.
__ push(value());
- if (!setter.is_null()) {
+ if (accessor_index >= 0) {
+ DCHECK(!holder.is(scratch));
+ DCHECK(!receiver.is(scratch));
+ DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ mov(receiver,
+ if (map->IsJSGlobalObjectMap()) {
+ __ mov(scratch,
FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ receiver = scratch;
}
__ push(receiver);
__ push(value());
ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ ParameterCount expected(expected_arguments);
+ __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
+ __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
@@ -274,16 +284,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
__ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Immediate(interceptor));
- __ push(scratch);
__ push(receiver);
__ push(holder);
}
@@ -385,12 +389,17 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
Register value_reg,
Label* miss_label) {
+ Register map_reg = scratch1();
+ Register scratch = scratch2();
+ DCHECK(!value_reg.is(map_reg));
+ DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
Label do_store;
+ __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
while (true) {
- __ CompareMap(value_reg, it.Current());
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
it.Advance();
if (it.Done()) {
__ j(not_equal, miss_label);
@@ -407,7 +416,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+ Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -419,8 +428,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant())
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
+ }
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -547,12 +557,17 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
__ push(receiver()); // receiver
// Push data from ExecutableAccessorInfo.
- if (isolate()->heap()->InNewSpace(callback->data())) {
- DCHECK(!scratch2().is(reg));
- __ mov(scratch2(), Immediate(callback));
- __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
+ Handle<Object> data(callback->data(), isolate());
+ if (data->IsUndefined() || data->IsSmi()) {
+ __ push(Immediate(data));
} else {
- __ push(Immediate(Handle<Object>(callback->data(), isolate())));
+ DCHECK(!scratch2().is(reg));
+ Handle<WeakCell> cell =
+ isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ __ GetWeakValue(scratch2(), cell);
+ __ push(scratch2());
}
__ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
// ReturnValue default value
@@ -677,7 +692,14 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ pop(scratch1()); // remove the return address
__ push(receiver());
__ push(holder_reg);
- __ Push(callback);
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ __ Push(callback);
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ Push(cell);
+ }
__ Push(name);
__ push(value());
__ push(scratch1()); // restore return address
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
index 89bd937480..160e9e9c67 100644
--- a/deps/v8/src/ic/x87/ic-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc
@@ -15,8 +15,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+ MacroAssembler* masm, LanguageMode language_mode) {
// Return address is on the stack.
DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
!ebx.is(StoreDescriptor::NameRegister()) &&
@@ -25,7 +25,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
__ push(StoreDescriptor::ReceiverRegister());
__ push(StoreDescriptor::NameRegister());
__ push(StoreDescriptor::ValueRegister());
- __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ push(Immediate(Smi::FromInt(language_mode)));
__ push(ebx); // return address
// Do tail-call to runtime routine.
@@ -36,7 +36,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
#undef __
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -63,7 +63,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
}
Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target);
// Polymorphic keyed stores may use the map register
@@ -71,16 +71,15 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
+ int receiver_count = maps->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
+ Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2());
- if (type->Is(HeapType::Number())) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index 1004ac036b..92005bd097 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -169,42 +169,65 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register scratch,
- Register result, Label* not_fast_array,
- Label* out_of_range) {
+ Register scratch2, Register result,
+ Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
// Scratch registers:
// scratch - used to hold elements of the receiver and the loaded value.
+ // scratch2 - holds maps and prototypes during prototype chain check.
// result - holds the result on exit if the load succeeds and
// we fall through.
+ Label check_prototypes, check_next_prototype;
+ Label done, in_bounds, return_undefined;
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CheckMap(scratch,
- masm->isolate()->factory()->fixed_array_map(),
- not_fast_array,
- DONT_DO_SMI_CHECK);
- } else {
- __ AssertFastElements(scratch);
- }
+ __ AssertFastElements(scratch);
+
// Check that the key (index) is within bounds.
__ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(above_equal, out_of_range);
+ __ j(below, &in_bounds);
+ // Out-of-bounds. Check the prototype chain to see if we can just return
+ // 'undefined'.
+ __ cmp(key, 0);
+ __ j(less, slow); // Negative keys can't take the fast OOB path.
+ __ bind(&check_prototypes);
+ __ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&check_next_prototype);
+ __ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
+ // scratch2: current prototype
+ __ cmp(scratch2, masm->isolate()->factory()->null_value());
+ __ j(equal, &return_undefined);
+ __ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
+ __ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
+ // scratch: elements of current prototype
+ // scratch2: map of current prototype
+ __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
+ __ j(below, slow);
+ __ test_b(
+ FieldOperand(scratch2, Map::kBitFieldOffset),
+ (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor));
+ __ j(not_zero, slow);
+ __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
+ __ j(not_equal, slow);
+ __ jmp(&check_next_prototype);
+
+ __ bind(&return_undefined);
+ __ mov(result, masm->isolate()->factory()->undefined_value());
+ __ jmp(&done);
+
+ __ bind(&in_bounds);
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
__ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ mov(result, scratch);
- }
+ // In case the loaded value is the_hole we have to check the prototype chain.
+ __ j(equal, &check_prototypes);
+ __ Move(result, scratch);
+ __ bind(&done);
}
@@ -307,7 +330,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -329,7 +352,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, eax, eax, NULL, &slow);
+ GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
Isolate* isolate = masm->isolate();
Counters* counters = isolate->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
@@ -369,95 +392,36 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
&slow);
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
+ // If the receiver is a fast-case object, check the stub cache. Otherwise
+ // probe the dictionary.
__ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(isolate->factory()->hash_table_map()));
__ j(equal, &probe_dictionary);
- // The receiver's map is still in eax, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- if (FLAG_debug_code) {
- __ cmp(eax, FieldOperand(receiver, HeapObject::kMapOffset));
- __ Check(equal, kMapIsNoLongerInEax);
- }
- __ mov(ebx, eax); // Keep the map around for later.
- __ shr(eax, KeyedLookupCache::kMapHashShift);
- __ mov(edi, FieldOperand(key, String::kHashFieldOffset));
- __ shr(edi, String::kHashShift);
- __ xor_(eax, edi);
- __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ mov(edi, eax);
- __ shl(edi, kPointerSizeLog2 + 1);
- if (i != 0) {
- __ add(edi, Immediate(kPointerSize * i * 2));
- }
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &try_next_entry);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(equal, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
+ if (FLAG_vector_ics) {
+ // When vector ics are in use, the handlers in the stub cache expect a
+ // vector and slot. Since we won't change the IC from any downstream
+ // misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
+ isolate->factory()->keyed_load_dummy_vector());
+ int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
}
- __ lea(edi, Operand(eax, 1));
- __ shl(edi, kPointerSizeLog2 + 1);
- __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
+ false, receiver, key, ebx, edi);
- // Get field offset.
- // ebx : receiver's map
- // eax : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- if (i != 0) {
- __ add(eax, Immediate(i));
- }
- __ mov(edi,
- Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
- __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, eax);
- __ j(above_equal, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
+ if (FLAG_vector_ics) {
+ __ pop(VectorLoadICDescriptor::VectorRegister());
+ __ pop(VectorLoadICDescriptor::SlotRegister());
}
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(eax, edi);
- __ mov(eax, FieldOperand(receiver, eax, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(eax,
- FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
+ // Cache miss.
+ GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
@@ -648,7 +612,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
// Return address is on the stack.
Label slow, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
@@ -685,7 +649,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Slow case: call runtime.
__ bind(&slow);
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+ PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 5d02e84ebd..51fb9204f2 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -28,8 +28,10 @@ class PlatformInterfaceDescriptor;
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CreateAllocationSite) \
+ V(CreateWeakCell) \
V(CallFunction) \
V(CallFunctionWithFeedback) \
+ V(CallFunctionWithFeedbackAndVector) \
V(CallConstruct) \
V(RegExpConstructResult) \
V(TransitionElementsKind) \
@@ -47,8 +49,9 @@ class PlatformInterfaceDescriptor;
V(Named) \
V(CallHandler) \
V(ArgumentAdaptor) \
- V(ApiGetter) \
V(ApiFunction) \
+ V(ApiAccessor) \
+ V(ApiGetter) \
V(ArgumentsAccessRead) \
V(StoreArrayLiteralElement) \
V(MathPowTagged) \
@@ -316,6 +319,19 @@ class CreateAllocationSiteDescriptor : public CallInterfaceDescriptor {
};
+class CreateWeakCellDescriptor : public CallInterfaceDescriptor {
+ public:
+ enum ParameterIndices {
+ kVectorIndex,
+ kSlotIndex,
+ kValueIndex,
+ kParameterCount
+ };
+
+ DECLARE_DESCRIPTOR(CreateWeakCellDescriptor, CallInterfaceDescriptor)
+};
+
+
class CallFunctionDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
@@ -329,6 +345,14 @@ class CallFunctionWithFeedbackDescriptor : public CallInterfaceDescriptor {
};
+class CallFunctionWithFeedbackAndVectorDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(CallFunctionWithFeedbackAndVectorDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
class CallConstructDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
@@ -443,6 +467,12 @@ class ApiFunctionDescriptor : public CallInterfaceDescriptor {
};
+class ApiAccessorDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ApiAccessorDescriptor, CallInterfaceDescriptor)
+};
+
+
class ApiGetterDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(ApiGetterDescriptor, CallInterfaceDescriptor)
diff --git a/deps/v8/src/interface.cc b/deps/v8/src/interface.cc
deleted file mode 100644
index a45804cf52..0000000000
--- a/deps/v8/src/interface.cc
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/interface.h"
-
-#include "src/base/lazy-instance.h"
-
-namespace v8 {
-namespace internal {
-
-// ---------------------------------------------------------------------------
-// Initialization.
-
-struct Interface::Cache {
- template<int flags>
- struct Create {
- static void Construct(Interface* ptr) { ::new (ptr) Interface(flags); }
- };
- typedef Create<VALUE + FROZEN> ValueCreate;
- typedef Create<VALUE + CONST + FROZEN> ConstCreate;
-
- static base::LazyInstance<Interface, ValueCreate>::type value_interface;
- static base::LazyInstance<Interface, ConstCreate>::type const_interface;
-};
-
-
-base::LazyInstance<Interface, Interface::Cache::ValueCreate>::type
- Interface::Cache::value_interface = LAZY_INSTANCE_INITIALIZER;
-
-base::LazyInstance<Interface, Interface::Cache::ConstCreate>::type
- Interface::Cache::const_interface = LAZY_INSTANCE_INITIALIZER;
-
-
-Interface* Interface::NewValue() {
- return Cache::value_interface.Pointer(); // Cached.
-}
-
-
-Interface* Interface::NewConst() {
- return Cache::const_interface.Pointer(); // Cached.
-}
-
-
-// ---------------------------------------------------------------------------
-// Lookup.
-
-Interface* Interface::Lookup(Handle<String> name, Zone* zone) {
- DCHECK(IsModule());
- ZoneHashMap* map = Chase()->exports_;
- if (map == nullptr) return nullptr;
- ZoneAllocationPolicy allocator(zone);
- ZoneHashMap::Entry* p =
- map->Lookup(name.location(), name->Hash(), false, allocator);
- if (p == nullptr) return nullptr;
- DCHECK(*static_cast<String**>(p->key) == *name);
- DCHECK(p->value != nullptr);
- return static_cast<Interface*>(p->value);
-}
-
-
-// ---------------------------------------------------------------------------
-// Addition.
-
-#ifdef DEBUG
-// Current nesting depth for debug output.
-class Nesting {
- public:
- Nesting() { current_ += 2; }
- ~Nesting() { current_ -= 2; }
- static int current() { return current_; }
- private:
- static int current_;
-};
-
-int Nesting::current_ = 0;
-#endif
-
-
-void Interface::DoAdd(const void* name, uint32_t hash, Interface* interface,
- Zone* zone, bool* ok) {
- MakeModule(ok);
- if (!*ok) return;
-
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("%*s# Adding...\n", Nesting::current(), "");
- PrintF("%*sthis = ", Nesting::current(), "");
- this->Print(Nesting::current());
- const AstRawString* raw = static_cast<const AstRawString*>(name);
- PrintF("%*s%.*s : ", Nesting::current(), "",
- raw->length(), raw->raw_data());
- interface->Print(Nesting::current());
- }
-#endif
-
- ZoneHashMap** map = &Chase()->exports_;
- ZoneAllocationPolicy allocator(zone);
-
- if (*map == nullptr) {
- *map = new(zone->New(sizeof(ZoneHashMap)))
- ZoneHashMap(ZoneHashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity, allocator);
- }
-
- ZoneHashMap::Entry* p =
- (*map)->Lookup(const_cast<void*>(name), hash, !IsFrozen(), allocator);
- if (p == nullptr) {
- // This didn't have name but was frozen already, that's an error.
- *ok = false;
- } else if (p->value == nullptr) {
- p->value = interface;
- } else {
-#ifdef DEBUG
- Nesting nested;
-#endif
- static_cast<Interface*>(p->value)->Unify(interface, zone, ok);
- }
-
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("%*sthis' = ", Nesting::current(), "");
- this->Print(Nesting::current());
- PrintF("%*s# Added.\n", Nesting::current(), "");
- }
-#endif
-}
-
-
-// ---------------------------------------------------------------------------
-// Unification.
-
-void Interface::Unify(Interface* that, Zone* zone, bool* ok) {
- if (this->forward_) return this->Chase()->Unify(that, zone, ok);
- if (that->forward_) return this->Unify(that->Chase(), zone, ok);
- DCHECK(this->forward_ == nullptr);
- DCHECK(that->forward_ == nullptr);
-
- *ok = true;
- if (this == that) return;
- if (this->IsValue()) {
- that->MakeValue(ok);
- if (*ok && this->IsConst()) that->MakeConst(ok);
- return;
- }
- if (that->IsValue()) {
- this->MakeValue(ok);
- if (*ok && that->IsConst()) this->MakeConst(ok);
- return;
- }
-
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("%*s# Unifying...\n", Nesting::current(), "");
- PrintF("%*sthis = ", Nesting::current(), "");
- this->Print(Nesting::current());
- PrintF("%*sthat = ", Nesting::current(), "");
- that->Print(Nesting::current());
- }
-#endif
-
- // Merge the smaller interface into the larger, for performance.
- if (this->exports_ != nullptr && (that->exports_ == nullptr ||
- this->exports_->occupancy() >= that->exports_->occupancy())) {
- this->DoUnify(that, ok, zone);
- } else {
- that->DoUnify(this, ok, zone);
- }
-
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("%*sthis' = ", Nesting::current(), "");
- this->Print(Nesting::current());
- PrintF("%*sthat' = ", Nesting::current(), "");
- that->Print(Nesting::current());
- PrintF("%*s# Unified.\n", Nesting::current(), "");
- }
-#endif
-}
-
-
-void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
- DCHECK(this->forward_ == nullptr);
- DCHECK(that->forward_ == nullptr);
- DCHECK(!this->IsValue());
- DCHECK(!that->IsValue());
- DCHECK(this->index_ == -1);
- DCHECK(that->index_ == -1);
- DCHECK(*ok);
-
-#ifdef DEBUG
- Nesting nested;
-#endif
-
- // Try to merge all members from that into this.
- ZoneHashMap* map = that->exports_;
- if (map != nullptr) {
- for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
- this->DoAdd(p->key, p->hash, static_cast<Interface*>(p->value), zone, ok);
- if (!*ok) return;
- }
- }
-
- // If the new interface is larger than that's, then there were members in
- // 'this' which 'that' didn't have. If 'that' was frozen that is an error.
- int this_size = this->exports_ == nullptr ? 0 : this->exports_->occupancy();
- int that_size = map == nullptr ? 0 : map->occupancy();
- if (that->IsFrozen() && this_size > that_size) {
- *ok = false;
- return;
- }
-
- // Merge interfaces.
- this->flags_ |= that->flags_;
- that->forward_ = this;
-}
-
-
-// ---------------------------------------------------------------------------
-// Printing.
-
-#ifdef DEBUG
-void Interface::Print(int n) {
- int n0 = n > 0 ? n : 0;
-
- if (FLAG_print_interface_details) {
- PrintF("%p", static_cast<void*>(this));
- for (Interface* link = this->forward_; link != nullptr;
- link = link->forward_) {
- PrintF("->%p", static_cast<void*>(link));
- }
- PrintF(" ");
- }
-
- if (IsUnknown()) {
- PrintF("unknown\n");
- } else if (IsConst()) {
- PrintF("const\n");
- } else if (IsValue()) {
- PrintF("value\n");
- } else if (IsModule()) {
- PrintF("module %d %s{", Index(), IsFrozen() ? "" : "(unresolved) ");
- ZoneHashMap* map = Chase()->exports_;
- if (map == nullptr || map->occupancy() == 0) {
- PrintF("}\n");
- } else if (n < 0 || n0 >= 2 * FLAG_print_interface_depth) {
- // Avoid infinite recursion on cyclic types.
- PrintF("...}\n");
- } else {
- PrintF("\n");
- for (ZoneHashMap::Entry* p = map->Start();
- p != nullptr; p = map->Next(p)) {
- String* name = *static_cast<String**>(p->key);
- Interface* interface = static_cast<Interface*>(p->value);
- PrintF("%*s%s : ", n0 + 2, "", name->ToAsciiArray());
- interface->Print(n0 + 2);
- }
- PrintF("%*s}\n", n0, "");
- }
- }
-}
-#endif
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/interface.h b/deps/v8/src/interface.h
deleted file mode 100644
index 1843f47846..0000000000
--- a/deps/v8/src/interface.h
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTERFACE_H_
-#define V8_INTERFACE_H_
-
-#include "src/ast-value-factory.h"
-#include "src/zone-inl.h" // For operator new.
-
-namespace v8 {
-namespace internal {
-
-
-// This class implements the following abstract grammar of interfaces
-// (i.e. module types):
-// interface ::= UNDETERMINED | VALUE | CONST | MODULE(exports)
-// exports ::= {name : interface, ...}
-// A frozen type is one that is fully determined. Unification does not
-// allow to turn non-const values into const, or adding additional exports to
-// frozen interfaces. Otherwise, unifying modules merges their exports.
-// Undetermined types are unification variables that can be unified freely.
-// There is a natural subsort lattice that reflects the increase of knowledge:
-//
-// undetermined
-// // | \\ .
-// value (frozen) module
-// // \\ / \ //
-// const fr.value fr.module
-// \\ /
-// fr.const
-//
-// where the bold lines are the only transitions allowed.
-
-class Interface : public ZoneObject {
- public:
- // ---------------------------------------------------------------------------
- // Factory methods.
-
- static Interface* NewUnknown(Zone* zone) {
- return new(zone) Interface(NONE);
- }
-
- static Interface* NewValue();
-
- static Interface* NewConst();
-
- static Interface* NewModule(Zone* zone) {
- return new(zone) Interface(MODULE);
- }
-
- // ---------------------------------------------------------------------------
- // Mutators.
-
- // Add a name to the list of exports. If it already exists, unify with
- // interface, otherwise insert unless this is closed.
- void Add(const AstRawString* name, Interface* interface, Zone* zone,
- bool* ok) {
- DoAdd(name, name->hash(), interface, zone, ok);
- }
-
- // Unify with another interface. If successful, both interface objects will
- // represent the same type, and changes to one are reflected in the other.
- void Unify(Interface* that, Zone* zone, bool* ok);
-
- // Determine this interface to be a value interface.
- void MakeValue(bool* ok) {
- *ok = !IsModule();
- if (*ok) Chase()->flags_ |= VALUE;
- }
-
- // Determine this interface to be an immutable interface.
- void MakeConst(bool* ok) {
- *ok = !IsModule() && (IsConst() || !IsFrozen());
- if (*ok) Chase()->flags_ |= VALUE + CONST;
- }
-
- // Determine this interface to be a module interface.
- void MakeModule(bool* ok) {
- *ok = !IsValue();
- if (*ok) Chase()->flags_ |= MODULE;
- }
-
- // Do not allow any further refinements, directly or through unification.
- void Freeze(bool* ok) {
- *ok = IsValue() || IsModule();
- if (*ok) Chase()->flags_ |= FROZEN;
- }
-
- // Assign an index.
- void Allocate(int index) {
- DCHECK(IsModule() && IsFrozen() && Chase()->index_ == -1);
- Chase()->index_ = index;
- }
-
- // ---------------------------------------------------------------------------
- // Accessors.
-
- // Check whether this is still a fully undetermined type.
- bool IsUnknown() { return Chase()->flags_ == NONE; }
-
- // Check whether this is a value type.
- bool IsValue() { return Chase()->flags_ & VALUE; }
-
- // Check whether this is a constant type.
- bool IsConst() { return Chase()->flags_ & CONST; }
-
- // Check whether this is a module type.
- bool IsModule() { return Chase()->flags_ & MODULE; }
-
- // Check whether this is closed (i.e. fully determined).
- bool IsFrozen() { return Chase()->flags_ & FROZEN; }
-
- bool IsUnified(Interface* that) {
- return Chase() == that->Chase()
- || (this->IsValue() == that->IsValue() &&
- this->IsConst() == that->IsConst());
- }
-
- int Length() {
- DCHECK(IsModule() && IsFrozen());
- ZoneHashMap* exports = Chase()->exports_;
- return exports ? exports->occupancy() : 0;
- }
-
- // The context slot in the hosting script context pointing to this module.
- int Index() {
- DCHECK(IsModule() && IsFrozen());
- return Chase()->index_;
- }
-
- // Look up an exported name. Returns NULL if not (yet) defined.
- Interface* Lookup(Handle<String> name, Zone* zone);
-
- // ---------------------------------------------------------------------------
- // Iterators.
-
- // Use like:
- // for (auto it = interface->iterator(); !it.done(); it.Advance()) {
- // ... it.name() ... it.interface() ...
- // }
- class Iterator {
- public:
- bool done() const { return entry_ == NULL; }
- const AstRawString* name() const {
- DCHECK(!done());
- return static_cast<const AstRawString*>(entry_->key);
- }
- Interface* interface() const {
- DCHECK(!done());
- return static_cast<Interface*>(entry_->value);
- }
- void Advance() { entry_ = exports_->Next(entry_); }
-
- private:
- friend class Interface;
- explicit Iterator(const ZoneHashMap* exports)
- : exports_(exports), entry_(exports ? exports->Start() : NULL) {}
-
- const ZoneHashMap* exports_;
- ZoneHashMap::Entry* entry_;
- };
-
- Iterator iterator() const { return Iterator(this->exports_); }
-
- // ---------------------------------------------------------------------------
- // Debugging.
-#ifdef DEBUG
- void Print(int n = 0); // n = indentation; n < 0 => don't print recursively
-#endif
-
- // ---------------------------------------------------------------------------
- // Implementation.
- private:
- struct Cache;
-
- enum Flags { // All flags are monotonic
- NONE = 0,
- VALUE = 1, // This type describes a value
- CONST = 2, // This type describes a constant
- MODULE = 4, // This type describes a module
- FROZEN = 8 // This type is fully determined
- };
-
- int flags_;
- Interface* forward_; // Unification link
- ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
- int index_;
-
- explicit Interface(int flags)
- : flags_(flags),
- forward_(NULL),
- exports_(NULL),
- index_(-1) {
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Creating %p\n", static_cast<void*>(this));
-#endif
- }
-
- Interface* Chase() {
- Interface* result = this;
- while (result->forward_ != NULL) result = result->forward_;
- if (result != this) forward_ = result; // On-the-fly path compression.
- return result;
- }
-
- void DoAdd(const void* name, uint32_t hash, Interface* interface, Zone* zone,
- bool* ok);
- void DoUnify(Interface* that, bool* ok, Zone* zone);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_INTERFACE_H_
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index b44c4d6d72..d2342794ee 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -25,11 +25,6 @@ SaveContext::SaveContext(Isolate* isolate)
}
-bool Isolate::DebuggerHasBreakPoints() {
- return debug()->has_break_points();
-}
-
-
base::RandomNumberGenerator* Isolate::random_number_generator() {
if (random_number_generator_ == NULL) {
if (FLAG_random_seed != 0) {
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index b24182b6a0..2c8367d7a1 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -83,7 +83,6 @@ void ThreadLocalTop::InitializeInternal() {
failed_access_check_callback_ = NULL;
save_context_ = NULL;
catcher_ = NULL;
- top_lookup_result_ = NULL;
promise_on_stack_ = NULL;
// These members are re-initialized later after deserialization
@@ -207,9 +206,6 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
it.frame()->Iterate(v);
}
-
- // Iterate pointers in live lookup results.
- thread->top_lookup_result_->Iterate(v);
}
@@ -404,7 +400,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
// mode function. The number of sloppy frames is stored as
// first element in the result array.
if (!encountered_strict_function) {
- if (fun->shared()->strict_mode() == STRICT) {
+ if (is_strict(fun->shared()->language_mode())) {
encountered_strict_function = true;
} else {
sloppy_frames++;
@@ -1063,12 +1059,39 @@ void Isolate::ComputeLocation(MessageLocation* target) {
int pos = frame->LookupCode()->SourcePosition(frame->pc());
// Compute the location from the function and the reloc info.
Handle<Script> casted_script(Script::cast(script));
- *target = MessageLocation(casted_script, pos, pos + 1);
+ *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
}
}
}
+bool Isolate::ComputeLocationFromException(MessageLocation* target,
+ Handle<Object> exception) {
+ if (!exception->IsJSObject()) return false;
+
+ Handle<Name> start_pos_symbol = factory()->error_start_pos_symbol();
+ Handle<Object> start_pos = JSObject::GetDataProperty(
+ Handle<JSObject>::cast(exception), start_pos_symbol);
+ if (!start_pos->IsSmi()) return false;
+ int start_pos_value = Handle<Smi>::cast(start_pos)->value();
+
+ Handle<Name> end_pos_symbol = factory()->error_end_pos_symbol();
+ Handle<Object> end_pos = JSObject::GetDataProperty(
+ Handle<JSObject>::cast(exception), end_pos_symbol);
+ if (!end_pos->IsSmi()) return false;
+ int end_pos_value = Handle<Smi>::cast(end_pos)->value();
+
+ Handle<Name> script_symbol = factory()->error_script_symbol();
+ Handle<Object> script = JSObject::GetDataProperty(
+ Handle<JSObject>::cast(exception), script_symbol);
+ if (!script->IsScript()) return false;
+
+ Handle<Script> cast_script(Script::cast(*script));
+ *target = MessageLocation(cast_script, start_pos_value, end_pos_value);
+ return true;
+}
+
+
bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
Handle<Object> exception) {
*target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
@@ -1181,9 +1204,12 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
}
}
if (!location) {
- if (!ComputeLocationFromStackTrace(&potential_computed_location,
- exception)) {
- ComputeLocation(&potential_computed_location);
+ if (!ComputeLocationFromException(&potential_computed_location,
+ exception)) {
+ if (!ComputeLocationFromStackTrace(&potential_computed_location,
+ exception)) {
+ ComputeLocation(&potential_computed_location);
+ }
}
location = &potential_computed_location;
}
@@ -1560,7 +1586,7 @@ Isolate::ThreadDataTable::~ThreadDataTable() {
// TODO(svenpanne) The assertion below would fire if an embedder does not
// cleanly dispose all Isolates before disposing v8, so we are conservative
// and leave it out for now.
- // DCHECK_EQ(NULL, list_);
+ // DCHECK_NULL(list_);
}
@@ -1644,7 +1670,6 @@ Isolate::Isolate(bool enable_serializer)
descriptor_lookup_cache_(NULL),
handle_scope_implementer_(NULL),
unicode_cache_(NULL),
- runtime_zone_(this),
inner_pointer_to_code_cache_(NULL),
global_handles_(NULL),
eternal_handles_(NULL),
@@ -1657,6 +1682,8 @@ Isolate::Isolate(bool enable_serializer)
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
+ store_buffer_hash_set_1_address_(NULL),
+ store_buffer_hash_set_2_address_(NULL),
serializer_enabled_(enable_serializer),
has_fatal_error_(false),
initialized_from_snapshot_(false),
@@ -1875,6 +1902,9 @@ Isolate::~Isolate() {
delete handle_scope_implementer_;
handle_scope_implementer_ = NULL;
+ delete code_tracer();
+ set_code_tracer(NULL);
+
delete compilation_cache_;
compilation_cache_ = NULL;
delete bootstrapper_;
@@ -2025,8 +2055,8 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
Simulator::Initialize(this);
#endif
#endif
@@ -2113,19 +2143,6 @@ bool Isolate::Init(Deserializer* des) {
std::ofstream(GetTurboCfgFileName().c_str(), std::ios_base::trunc);
}
- // If we are deserializing, log non-function code objects and compiled
- // functions found in the snapshot.
- if (!create_heap_objects &&
- (FLAG_log_code ||
- FLAG_ll_prof ||
- FLAG_perf_jit_prof ||
- FLAG_perf_basic_prof ||
- logger_->is_logging_code_events())) {
- HandleScope scope(this);
- LOG(this, LogCodeObjects());
- LOG(this, LogCompiledFunctions());
- }
-
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
@@ -2550,6 +2567,57 @@ std::string Isolate::GetTurboCfgFileName() {
}
+// Heap::detached_contexts tracks detached contexts as pairs
+// (number of GC since the context was detached, the context).
+void Isolate::AddDetachedContext(Handle<Context> context) {
+ HandleScope scope(this);
+ Handle<WeakCell> cell = factory()->NewWeakCell(context);
+ Handle<FixedArray> detached_contexts(heap()->detached_contexts());
+ int length = detached_contexts->length();
+ detached_contexts = FixedArray::CopySize(detached_contexts, length + 2);
+ detached_contexts->set(length, Smi::FromInt(0));
+ detached_contexts->set(length + 1, *cell);
+ heap()->set_detached_contexts(*detached_contexts);
+}
+
+
+void Isolate::CheckDetachedContextsAfterGC() {
+ HandleScope scope(this);
+ Handle<FixedArray> detached_contexts(heap()->detached_contexts());
+ int length = detached_contexts->length();
+ if (length == 0) return;
+ int new_length = 0;
+ for (int i = 0; i < length; i += 2) {
+ int mark_sweeps = Smi::cast(detached_contexts->get(i))->value();
+ WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
+ if (!cell->cleared()) {
+ detached_contexts->set(new_length, Smi::FromInt(mark_sweeps + 1));
+ detached_contexts->set(new_length + 1, cell);
+ new_length += 2;
+ }
+ counters()->detached_context_age_in_gc()->AddSample(mark_sweeps + 1);
+ }
+ if (FLAG_trace_detached_contexts) {
+ PrintF("%d detached contexts are collected out of %d\n",
+ length - new_length, length);
+ for (int i = 0; i < new_length; i += 2) {
+ int mark_sweeps = Smi::cast(detached_contexts->get(i))->value();
+ WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
+ if (mark_sweeps > 3) {
+ PrintF("detached context 0x%p\n survived %d GCs (leak?)\n",
+ static_cast<void*>(cell->value()), mark_sweeps);
+ }
+ }
+ }
+ if (new_length == 0) {
+ heap()->set_detached_contexts(heap()->empty_fixed_array());
+ } else if (new_length < length) {
+ heap()->RightTrimFixedArray<Heap::FROM_GC>(*detached_contexts,
+ length - new_length);
+ }
+}
+
+
bool StackLimitCheck::JsHasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 42a814ad72..fdd1832888 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -82,9 +82,10 @@ class Debug;
class Debugger;
class PromiseOnStack;
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
class Redirection;
class Simulator;
@@ -312,9 +313,6 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
- // Head of the list of live LookupResults.
- LookupResult* top_lookup_result_;
-
private:
void InitializeInternal();
@@ -322,9 +320,10 @@ class ThreadLocalTop BASE_EMBEDDED {
};
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
+ V8_TARGET_ARCH_PPC && !defined(__PPC__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
V8_TARGET_ARCH_MIPS64 && !defined(__mips__)
#define ISOLATE_INIT_SIMULATOR_LIST(V) \
@@ -417,9 +416,10 @@ class Isolate {
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
simulator_(NULL),
#endif
@@ -432,9 +432,10 @@ class Isolate {
FIELD_ACCESSOR(uintptr_t, stack_limit)
FIELD_ACCESSOR(ThreadState*, thread_state)
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
FIELD_ACCESSOR(Simulator*, simulator)
#endif
@@ -449,9 +450,10 @@ class Isolate {
uintptr_t stack_limit_;
ThreadState* thread_state_;
-#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
!defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
- !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__PPC__) && V8_TARGET_ARCH_PPC || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS64
Simulator* simulator_;
#endif
@@ -804,6 +806,8 @@ class Isolate {
// Attempts to compute the current source location, storing the
// result in the target out parameter.
void ComputeLocation(MessageLocation* target);
+ bool ComputeLocationFromException(MessageLocation* target,
+ Handle<Object> exception);
bool ComputeLocationFromStackTrace(MessageLocation* target,
Handle<Object> exception);
@@ -973,8 +977,6 @@ class Isolate {
Debug* debug() { return debug_; }
- inline bool DebuggerHasBreakPoints();
-
CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
HeapProfiler* heap_profiler() const { return heap_profiler_; }
@@ -1003,8 +1005,6 @@ class Isolate {
return embedder_data_[slot];
}
- THREAD_LOCAL_TOP_ACCESSOR(LookupResult*, top_lookup_result)
-
bool serializer_enabled() const { return serializer_enabled_; }
bool IsDead() { return has_fatal_error_; }
@@ -1118,6 +1118,27 @@ class Isolate {
int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif
+ void set_store_buffer_hash_set_1_address(
+ uintptr_t* store_buffer_hash_set_1_address) {
+ store_buffer_hash_set_1_address_ = store_buffer_hash_set_1_address;
+ }
+
+ uintptr_t* store_buffer_hash_set_1_address() {
+ return store_buffer_hash_set_1_address_;
+ }
+
+ void set_store_buffer_hash_set_2_address(
+ uintptr_t* store_buffer_hash_set_2_address) {
+ store_buffer_hash_set_2_address_ = store_buffer_hash_set_2_address;
+ }
+
+ uintptr_t* store_buffer_hash_set_2_address() {
+ return store_buffer_hash_set_2_address_;
+ }
+
+ void AddDetachedContext(Handle<Context> context);
+ void CheckDetachedContextsAfterGC();
+
private:
explicit Isolate(bool enable_serializer);
@@ -1270,6 +1291,9 @@ class Isolate {
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CallInterfaceDescriptorData* call_descriptor_data_;
base::RandomNumberGenerator* random_number_generator_;
+ // TODO(hpayer): Remove the following store buffer addresses.
+ uintptr_t* store_buffer_hash_set_1_address_;
+ uintptr_t* store_buffer_hash_set_2_address_;
// Whether the isolate has been created for snapshotting.
bool serializer_enabled_;
@@ -1336,9 +1360,9 @@ class Isolate {
v8::Isolate::UseCounterCallback use_counter_callback_;
BasicBlockProfiler* basic_block_profiler_;
+
friend class ExecutionAccess;
friend class HandleScopeImplementer;
- friend class IsolateInitializer;
friend class OptimizingCompilerThread;
friend class SweeperThread;
friend class ThreadManager;
@@ -1532,7 +1556,7 @@ class CodeTracer FINAL : public Malloced {
}
if (file_ == NULL) {
- file_ = base::OS::FOpen(filename_.start(), "a");
+ file_ = base::OS::FOpen(filename_.start(), "ab");
}
scope_depth_++;
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 5ebbcdd861..9a22738e98 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -32,7 +32,6 @@ class JsonParser BASE_EMBEDDED {
source_length_(source->length()),
isolate_(source->map()->GetHeap()->isolate()),
factory_(isolate_->factory()),
- zone_(isolate_),
object_constructor_(isolate_->native_context()->object_function(),
isolate_),
position_(-1) {
@@ -109,10 +108,8 @@ class JsonParser BASE_EMBEDDED {
const uint8_t* expected_chars = content.ToOneByteVector().start();
for (int i = 0; i < length; i++) {
uint8_t c0 = input_chars[i];
- if (c0 != expected_chars[i] ||
- c0 == '"' || c0 < 0x20 || c0 == '\\') {
- return false;
- }
+ // The expected string has to be free of \, " and characters < 0x20.
+ if (c0 != expected_chars[i]) return false;
}
if (input_chars[length] == '"') {
position_ = position_ + length + 1;
@@ -553,10 +550,9 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
double number;
if (seq_one_byte) {
Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
- number = StringToDouble(isolate()->unicode_cache(),
- chars,
+ number = StringToDouble(isolate()->unicode_cache(), chars,
NO_FLAGS, // Hex, octal or trailing junk.
- base::OS::nan_value());
+ std::numeric_limits<double>::quiet_NaN());
} else {
Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 393551dd9a..d7caefc9e9 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -539,7 +539,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
PropertyDetails details = map->instance_descriptors()->GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> property;
- if (details.type() == FIELD && *map == object->map()) {
+ if (details.type() == DATA && *map == object->map()) {
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
Isolate* isolate = object->GetIsolate();
if (object->IsUnboxedDoubleField(field_index)) {
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 81ad080da5..bb7ad60414 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -31,6 +31,8 @@
#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
@@ -136,7 +138,7 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags) {
Isolate* isolate = re->GetIsolate();
- Zone zone(isolate);
+ Zone zone;
CompilationCache* compilation_cache = isolate->compilation_cache();
MaybeHandle<FixedArray> maybe_cached =
compilation_cache->LookupRegExp(pattern, flags);
@@ -153,8 +155,9 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &parse_result, &zone)) {
+ if (!RegExpParser::ParseRegExp(re->GetIsolate(), &zone, &reader,
+ flags.is_multiline(), flags.is_unicode(),
+ &parse_result)) {
// Throw an exception if we fail to parse the pattern.
return ThrowRegExpException(re,
pattern,
@@ -369,7 +372,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
bool is_one_byte) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
- Zone zone(isolate);
+ Zone zone;
PostponeInterruptsScope postpone(isolate);
// If we had a compilation error the last time this is saved at the
// saved code index.
@@ -400,9 +403,8 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
pattern = String::Flatten(pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &compile_data,
- &zone)) {
+ if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags.is_multiline(),
+ flags.is_unicode(), &compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
USE(ThrowRegExpException(re,
@@ -412,9 +414,9 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
return false;
}
RegExpEngine::CompilationResult result = RegExpEngine::Compile(
- &compile_data, flags.is_ignore_case(), flags.is_global(),
+ isolate, &zone, &compile_data, flags.is_ignore_case(), flags.is_global(),
flags.is_multiline(), flags.is_sticky(), pattern, sample_subject,
- is_one_byte, &zone);
+ is_one_byte);
if (result.error_message != NULL) {
// Unable to compile regexp.
Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
@@ -970,8 +972,8 @@ class FrequencyCollator {
class RegExpCompiler {
public:
- RegExpCompiler(int capture_count, bool ignore_case, bool is_one_byte,
- Zone* zone);
+ RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
+ bool ignore_case, bool is_one_byte);
int AllocateRegister() {
if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
@@ -1013,6 +1015,7 @@ class RegExpCompiler {
current_expansion_factor_ = value;
}
+ Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
static const int kNoRegister = -1;
@@ -1029,6 +1032,7 @@ class RegExpCompiler {
bool optimize_;
int current_expansion_factor_;
FrequencyCollator frequency_collator_;
+ Isolate* isolate_;
Zone* zone_;
};
@@ -1051,8 +1055,8 @@ static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) {
// Attempts to compile the regexp using an Irregexp code generator. Returns
// a fixed array or a null handle depending on whether it succeeded.
-RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case,
- bool one_byte, Zone* zone)
+RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
+ bool ignore_case, bool one_byte)
: next_register_(2 * (capture_count + 1)),
work_list_(NULL),
recursion_depth_(0),
@@ -1062,6 +1066,7 @@ RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case,
optimize_(FLAG_regexp_optimization),
current_expansion_factor_(1),
frequency_collator_(),
+ isolate_(isolate),
zone_(zone) {
accept_ = new(zone) EndNode(EndNode::ACCEPT, zone);
DCHECK(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
@@ -1077,7 +1082,8 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
#ifdef DEBUG
if (FLAG_trace_regexp_assembler)
- macro_assembler_ = new RegExpMacroAssemblerTracer(macro_assembler);
+ macro_assembler_ =
+ new RegExpMacroAssemblerTracer(isolate(), macro_assembler);
else
#endif
macro_assembler_ = macro_assembler;
@@ -1093,7 +1099,7 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
while (!work_list.is_empty()) {
work_list.RemoveLast()->Emit(this, &new_trace);
}
- if (reg_exp_too_big_) return IrregexpRegExpTooBig(zone_->isolate());
+ if (reg_exp_too_big_) return IrregexpRegExpTooBig(isolate_);
Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
heap->IncreaseTotalRegexpCodeGenerated(code->Size());
@@ -1822,7 +1828,7 @@ static void EmitUseLookupTable(
for (int i = j; i < kSize; i++) {
templ[i] = bit;
}
- Factory* factory = masm->zone()->isolate()->factory();
+ Factory* factory = masm->isolate()->factory();
// TODO(erikcorry): Cache these.
Handle<ByteArray> ba = factory->NewByteArray(kSize, TENURED);
for (int i = 0; i < kSize; i++) {
@@ -2501,7 +2507,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) {
- Isolate* isolate = compiler->macro_assembler()->zone()->isolate();
+ Isolate* isolate = compiler->macro_assembler()->isolate();
DCHECK(characters_filled_in < details->characters());
int characters = details->characters();
int char_mask;
@@ -3202,7 +3208,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
bool first_element_checked,
int* checked_up_to) {
RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Isolate* isolate = assembler->zone()->isolate();
+ Isolate* isolate = assembler->isolate();
bool one_byte = compiler->one_byte();
Label* backtrack = trace->backtrack();
QuickCheckDetails* quick_check = trace->quick_check_performed();
@@ -3363,7 +3369,7 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
}
-void TextNode::MakeCaseIndependent(bool is_one_byte) {
+void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
@@ -3375,7 +3381,7 @@ void TextNode::MakeCaseIndependent(bool is_one_byte) {
ZoneList<CharacterRange>* ranges = cc->ranges(zone());
int range_count = ranges->length();
for (int j = 0; j < range_count; j++) {
- ranges->at(j).AddCaseEquivalents(ranges, is_one_byte, zone());
+ ranges->at(j).AddCaseEquivalents(isolate, zone(), ranges, is_one_byte);
}
}
}
@@ -3440,14 +3446,14 @@ int ChoiceNode::GreedyLoopTextLengthForAlternative(
void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) {
- DCHECK_EQ(loop_node_, NULL);
+ DCHECK_NULL(loop_node_);
AddAlternative(alt);
loop_node_ = alt.node();
}
void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
- DCHECK_EQ(continue_node_, NULL);
+ DCHECK_NULL(continue_node_);
AddAlternative(alt);
continue_node_ = alt.node();
}
@@ -3467,7 +3473,7 @@ void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
macro_assembler->GoTo(trace->loop_label());
return;
}
- DCHECK(trace->stop_node() == NULL);
+ DCHECK_NULL(trace->stop_node());
if (!trace->is_trivial()) {
trace->Flush(compiler, this);
return;
@@ -3774,7 +3780,7 @@ void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
return;
}
- Factory* factory = masm->zone()->isolate()->factory();
+ Factory* factory = masm->isolate()->factory();
Handle<ByteArray> boolean_skip_table = factory->NewByteArray(kSize, TENURED);
int skip_distance = GetSkipTable(
min_lookahead, max_lookahead, boolean_skip_table);
@@ -5288,8 +5294,8 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
ZoneList<CharacterRange>** included,
ZoneList<CharacterRange>** excluded,
Zone* zone) {
- DCHECK_EQ(NULL, *included);
- DCHECK_EQ(NULL, *excluded);
+ DCHECK_NULL(*included);
+ DCHECK_NULL(*excluded);
DispatchTable table(zone);
for (int i = 0; i < base->length(); i++)
table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone);
@@ -5302,9 +5308,9 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
}
-void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
- bool is_one_byte, Zone* zone) {
- Isolate* isolate = zone->isolate();
+void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool is_one_byte) {
uc16 bottom = from();
uc16 top = to();
if (is_one_byte && !RangeContainsLatin1Equivalents(*this)) {
@@ -5592,7 +5598,9 @@ void DispatchTable::AddRange(CharacterRange full_range, int value,
if (tree()->is_empty()) {
// If this is the first range we just insert into the table.
ZoneSplayTree<Config>::Locator loc;
- DCHECK_RESULT(tree()->Insert(current.from(), &loc));
+ bool inserted = tree()->Insert(current.from(), &loc);
+ DCHECK(inserted);
+ USE(inserted);
loc.set_value(Entry(current.from(), current.to(),
empty()->Extend(value, zone)));
return;
@@ -5618,7 +5626,9 @@ void DispatchTable::AddRange(CharacterRange full_range, int value,
// to the map and let the next step deal with merging it with
// the range we're adding.
ZoneSplayTree<Config>::Locator loc;
- DCHECK_RESULT(tree()->Insert(right.from(), &loc));
+ bool inserted = tree()->Insert(right.from(), &loc);
+ DCHECK(inserted);
+ USE(inserted);
loc.set_value(Entry(right.from(),
right.to(),
entry->out_set()));
@@ -5634,7 +5644,9 @@ void DispatchTable::AddRange(CharacterRange full_range, int value,
// then we have to add a range covering just that space.
if (current.from() < entry->from()) {
ZoneSplayTree<Config>::Locator ins;
- DCHECK_RESULT(tree()->Insert(current.from(), &ins));
+ bool inserted = tree()->Insert(current.from(), &ins);
+ DCHECK(inserted);
+ USE(inserted);
ins.set_value(Entry(current.from(),
entry->from() - 1,
empty()->Extend(value, zone)));
@@ -5645,7 +5657,9 @@ void DispatchTable::AddRange(CharacterRange full_range, int value,
// we have to snap the right part off and add it separately.
if (entry->to() > current.to()) {
ZoneSplayTree<Config>::Locator ins;
- DCHECK_RESULT(tree()->Insert(current.to() + 1, &ins));
+ bool inserted = tree()->Insert(current.to() + 1, &ins);
+ DCHECK(inserted);
+ USE(inserted);
ins.set_value(Entry(current.to() + 1,
entry->to(),
entry->out_set()));
@@ -5665,7 +5679,9 @@ void DispatchTable::AddRange(CharacterRange full_range, int value,
} else {
// There is no overlap so we can just add the range
ZoneSplayTree<Config>::Locator ins;
- DCHECK_RESULT(tree()->Insert(current.from(), &ins));
+ bool inserted = tree()->Insert(current.from(), &ins);
+ DCHECK(inserted);
+ USE(inserted);
ins.set_value(Entry(current.from(),
current.to(),
empty()->Extend(value, zone)));
@@ -5692,7 +5708,7 @@ OutSet* DispatchTable::Get(uc16 value) {
void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check(that->zone()->isolate());
+ StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
fail("Stack overflow");
return;
@@ -5726,7 +5742,7 @@ void TextNode::CalculateOffsets() {
void Analysis::VisitText(TextNode* that) {
if (ignore_case_) {
- that->MakeCaseIndependent(is_one_byte_);
+ that->MakeCaseIndependent(isolate(), is_one_byte_);
}
EnsureAnalyzed(that->on_success());
if (!has_failed()) {
@@ -6002,13 +6018,14 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) {
RegExpEngine::CompilationResult RegExpEngine::Compile(
- RegExpCompileData* data, bool ignore_case, bool is_global,
- bool is_multiline, bool is_sticky, Handle<String> pattern,
- Handle<String> sample_subject, bool is_one_byte, Zone* zone) {
+ Isolate* isolate, Zone* zone, RegExpCompileData* data, bool ignore_case,
+ bool is_global, bool is_multiline, bool is_sticky, Handle<String> pattern,
+ Handle<String> sample_subject, bool is_one_byte) {
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
- return IrregexpRegExpTooBig(zone->isolate());
+ return IrregexpRegExpTooBig(isolate);
}
- RegExpCompiler compiler(data->capture_count, ignore_case, is_one_byte, zone);
+ RegExpCompiler compiler(isolate, zone, data->capture_count, ignore_case,
+ is_one_byte);
compiler.set_optimize(!TooMuchRegExpCode(pattern));
@@ -6068,11 +6085,11 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
data->node = node;
- Analysis analysis(ignore_case, is_one_byte);
+ Analysis analysis(isolate, ignore_case, is_one_byte);
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();
- return CompilationResult(zone->isolate(), error_message);
+ return CompilationResult(isolate, error_message);
}
// Create the correct assembler for the architecture.
@@ -6084,26 +6101,29 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
: NativeRegExpMacroAssembler::UC16;
#if V8_TARGET_ARCH_IA32
- RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
+ RegExpMacroAssemblerIA32 macro_assembler(isolate, zone, mode,
+ (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_X64
- RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
+ RegExpMacroAssemblerX64 macro_assembler(isolate, zone, mode,
+ (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_ARM
- RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
+ RegExpMacroAssemblerARM macro_assembler(isolate, zone, mode,
+ (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_ARM64
- RegExpMacroAssemblerARM64 macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
+ RegExpMacroAssemblerARM64 macro_assembler(isolate, zone, mode,
+ (data->capture_count + 1) * 2);
+#elif V8_TARGET_ARCH_PPC
+ RegExpMacroAssemblerPPC macro_assembler(isolate, zone, mode,
+ (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_MIPS
- RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
+ RegExpMacroAssemblerMIPS macro_assembler(isolate, zone, mode,
+ (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_MIPS64
- RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
+ RegExpMacroAssemblerMIPS macro_assembler(isolate, zone, mode,
+ (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_X87
- RegExpMacroAssemblerX87 macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
+ RegExpMacroAssemblerX87 macro_assembler(isolate, zone, mode,
+ (data->capture_count + 1) * 2);
#else
#error "Unsupported architecture"
#endif
@@ -6111,7 +6131,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#else // V8_INTERPRETED_REGEXP
// Interpreted regexp implementation.
EmbeddedVector<byte, 1024> codes;
- RegExpMacroAssemblerIrregexp macro_assembler(codes, zone);
+ RegExpMacroAssemblerIrregexp macro_assembler(isolate, codes, zone);
#endif // V8_INTERPRETED_REGEXP
macro_assembler.set_slow_safe(TooMuchRegExpCode(pattern));
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index 4b84c9558d..0b4f39dc49 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -7,7 +7,6 @@
#include "src/allocation.h"
#include "src/assembler.h"
-#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -240,7 +239,7 @@ class CharacterRange {
public:
CharacterRange() : from_(0), to_(0) { }
// For compatibility with the CHECK_OK macro
- CharacterRange(void* null) { DCHECK_EQ(NULL, null); } //NOLINT
+ CharacterRange(void* null) { DCHECK_NULL(null); } // NOLINT
CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
Zone* zone);
@@ -263,8 +262,8 @@ class CharacterRange {
bool is_valid() { return from_ <= to_; }
bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_one_byte,
- Zone* zone);
+ void AddCaseEquivalents(Isolate* isolate, Zone* zone,
+ ZoneList<CharacterRange>* ranges, bool is_one_byte);
static void Split(ZoneList<CharacterRange>* base,
Vector<const int> overlay,
ZoneList<CharacterRange>** included,
@@ -847,7 +846,7 @@ class TextNode: public SeqRegExpNode {
int characters_filled_in,
bool not_at_start);
ZoneList<TextElement>* elements() { return elms_; }
- void MakeCaseIndependent(bool is_one_byte);
+ void MakeCaseIndependent(Isolate* isolate, bool is_one_byte);
virtual int GreedyLoopTextLength();
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler);
@@ -1598,8 +1597,9 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
// +-------+ +------------+
class Analysis: public NodeVisitor {
public:
- Analysis(bool ignore_case, bool is_one_byte)
- : ignore_case_(ignore_case),
+ Analysis(Isolate* isolate, bool ignore_case, bool is_one_byte)
+ : isolate_(isolate),
+ ignore_case_(ignore_case),
is_one_byte_(is_one_byte),
error_message_(NULL) {}
void EnsureAnalyzed(RegExpNode* node);
@@ -1619,7 +1619,10 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
error_message_ = error_message;
}
+ Isolate* isolate() const { return isolate_; }
+
private:
+ Isolate* isolate_;
bool ignore_case_;
bool is_one_byte_;
const char* error_message_;
@@ -1652,19 +1655,18 @@ class RegExpEngine: public AllStatic {
code(isolate->heap()->the_hole_value()),
num_registers(0) {}
CompilationResult(Object* code, int registers)
- : error_message(NULL),
- code(code),
- num_registers(registers) {}
+ : error_message(NULL), code(code), num_registers(registers) {}
const char* error_message;
Object* code;
int num_registers;
};
- static CompilationResult Compile(RegExpCompileData* input, bool ignore_case,
+ static CompilationResult Compile(Isolate* isolate, Zone* zone,
+ RegExpCompileData* input, bool ignore_case,
bool global, bool multiline, bool sticky,
Handle<String> pattern,
Handle<String> sample_subject,
- bool is_one_byte, Zone* zone);
+ bool is_one_byte);
static bool TooMuchRegExpCode(Handle<String> pattern);
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index 33523127ee..ceee09a810 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -39,7 +39,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
bool LayoutDescriptor::InobjectUnboxedField(int inobject_properties,
PropertyDetails details) {
- if (details.type() != FIELD || !details.representation().IsDouble()) {
+ if (details.type() != DATA || !details.representation().IsDouble()) {
return false;
}
// We care only about in-object properties.
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index 77b8ec4d1f..121836c173 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -72,13 +72,16 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(
}
-Handle<LayoutDescriptor> LayoutDescriptor::Append(Handle<Map> map,
- PropertyDetails details) {
+Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
+ Handle<Map> map, PropertyDetails details) {
+ DCHECK(map->owns_descriptors());
Isolate* isolate = map->GetIsolate();
Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
isolate);
if (!InobjectUnboxedField(map->inobject_properties(), details)) {
+ DCHECK(details.location() != kField ||
+ layout_descriptor->IsTagged(details.field_index()));
return layout_descriptor;
}
int field_index = details.field_index();
@@ -104,6 +107,8 @@ Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
return full_layout_descriptor;
}
if (!InobjectUnboxedField(map->inobject_properties(), details)) {
+ DCHECK(details.location() != kField ||
+ layout_descriptor->IsTagged(details.field_index()));
return handle(layout_descriptor, map->GetIsolate());
}
int field_index = details.field_index();
@@ -127,7 +132,6 @@ Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
int new_capacity) {
int old_capacity = layout_descriptor->capacity();
if (new_capacity <= old_capacity) {
- // Nothing to do with layout in Smi-form.
return layout_descriptor;
}
Handle<LayoutDescriptor> new_layout_descriptor =
@@ -252,5 +256,26 @@ bool LayoutDescriptorHelper::IsTagged(
DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
return tagged;
}
+
+
+bool LayoutDescriptor::IsConsistentWithMap(Map* map) {
+ if (FLAG_unbox_double_fields) {
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int nof_descriptors = map->NumberOfOwnDescriptors();
+ for (int i = 0; i < nof_descriptors; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != DATA) continue;
+ FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
+ bool tagged_expected =
+ !field_index.is_inobject() || !details.representation().IsDouble();
+ for (int bit = 0; bit < details.field_width_in_words(); bit++) {
+ bool tagged_actual = IsTagged(details.field_index() + bit);
+ DCHECK_EQ(tagged_expected, tagged_actual);
+ if (tagged_actual != tagged_expected) return false;
+ }
+ }
+ }
+ return true;
+}
}
} // namespace v8::internal
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index cc2666a487..8f2942c0eb 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -53,10 +53,10 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
Handle<DescriptorArray> descriptors,
int num_descriptors);
- // Creates new layout descriptor by appending property with |details| to
- // |map|'s layout descriptor.
- static Handle<LayoutDescriptor> Append(Handle<Map> map,
- PropertyDetails details);
+ // Modifies |map|'s layout descriptor or creates a new one if necessary by
+ // appending property with |details| to it.
+ static Handle<LayoutDescriptor> ShareAppend(Handle<Map> map,
+ PropertyDetails details);
// Creates new layout descriptor by appending property with |details| to
// |map|'s layout descriptor and if it is still fast then returns it.
@@ -69,10 +69,8 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
// tagged (FastPointerLayout).
V8_INLINE static LayoutDescriptor* FastPointerLayout();
-#ifdef DEBUG
// Check that this layout descriptor corresponds to given map.
bool IsConsistentWithMap(Map* map);
-#endif
#ifdef OBJECT_PRINT
// For our gdb macros, we should perhaps change these in the future.
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index c17c4ecfee..021cafe146 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -63,7 +63,7 @@ class List {
// backing store (e.g. Add).
inline T& operator[](int i) const {
DCHECK(0 <= i);
- SLOW_DCHECK(i < length_);
+ SLOW_DCHECK(static_cast<unsigned>(i) < static_cast<unsigned>(length_));
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
index bafa00f07b..98923ae3aa 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -15,6 +15,8 @@
#include "src/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 5f4f17f16f..36c8c7d90a 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -511,8 +511,7 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
LAllocator::LAllocator(int num_values, HGraph* graph)
- : zone_(graph->isolate()),
- chunk_(NULL),
+ : chunk_(NULL),
live_in_sets_(graph->blocks()->length(), zone()),
live_ranges_(num_values * 2, zone()),
fixed_live_ranges_(NULL),
@@ -2175,8 +2174,8 @@ LAllocatorPhase::LAllocatorPhase(const char* name, LAllocator* allocator)
LAllocatorPhase::~LAllocatorPhase() {
if (FLAG_hydrogen_stats) {
- unsigned size = allocator_->zone()->allocation_size() -
- allocator_zone_start_allocation_size_;
+ size_t size = allocator_->zone()->allocation_size() -
+ allocator_zone_start_allocation_size_;
isolate()->GetHStatistics()->SaveTiming(name(), base::TimeDelta(), size);
}
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index f63077e19d..2a8080011f 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -564,7 +564,7 @@ class LAllocatorPhase : public CompilationPhase {
private:
LAllocator* allocator_;
- unsigned allocator_zone_start_allocation_size_;
+ size_t allocator_zone_start_allocation_size_;
DISALLOW_COPY_AND_ASSIGN(LAllocatorPhase);
};
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
index 4534b46d80..242db222f2 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/lithium-codegen.cc
@@ -29,6 +29,9 @@
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#include "src/x87/lithium-codegen-x87.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
@@ -149,12 +152,8 @@ void LCodeGenBase::Comment(const char* format, ...) {
}
-void LCodeGenBase::DeoptComment(const Deoptimizer::Reason& reason) {
- std::ostringstream os;
- os << ";;; deoptimize at " << HSourcePosition(reason.raw_position) << " "
- << reason.mnemonic;
- if (reason.detail != NULL) os << ": " << reason.detail;
- Comment("%s", os.str().c_str());
+void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
+ masm()->RecordDeoptReason(deopt_info.deopt_reason, deopt_info.raw_position);
}
@@ -167,66 +166,6 @@ int LCodeGenBase::GetNextEmittedBlock() const {
}
-static void AddWeakObjectToCodeDependency(Isolate* isolate,
- Handle<Object> object,
- Handle<Code> code) {
- Heap* heap = isolate->heap();
- heap->EnsureWeakObjectToCodeTable();
- Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
- dep = DependentCode::Insert(dep, DependentCode::kWeakCodeGroup, code);
- heap->AddWeakObjectToCodeDependency(object, dep);
-}
-
-
-void LCodeGenBase::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
- DCHECK(code->is_optimized_code());
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- ZoneList<Handle<Cell> > cells(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::CELL);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::CELL &&
- code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
- Handle<Cell> cell(it.rinfo()->target_cell());
- cells.Add(cell, zone());
- } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
- code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- } else if (it.rinfo()->target_object()->IsCell()) {
- Handle<Cell> cell(Cell::cast(it.rinfo()->target_object()));
- cells.Add(cell, zone());
- }
- }
- }
- if (FLAG_enable_ool_constant_pool) {
- code->constant_pool()->set_weak_object_state(
- ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE);
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- Map::AddDependentCode(maps.at(i), DependentCode::kWeakCodeGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate(), objects.at(i), code);
- }
- for (int i = 0; i < cells.length(); i++) {
- AddWeakObjectToCodeDependency(isolate(), cells.at(i), code);
- }
-}
-
-
void LCodeGenBase::Abort(BailoutReason reason) {
info()->AbortOptimization(reason);
status_ = ABORTED;
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
index 40d4d8e4f8..17bf78cac0 100644
--- a/deps/v8/src/lithium-codegen.h
+++ b/deps/v8/src/lithium-codegen.h
@@ -35,7 +35,7 @@ class LCodeGenBase BASE_EMBEDDED {
HGraph* graph() const;
void FPRINTF_CHECKING Comment(const char* format, ...);
- void DeoptComment(const Deoptimizer::Reason& reason);
+ void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info);
bool GenerateBody();
virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
diff --git a/deps/v8/src/lithium-inl.h b/deps/v8/src/lithium-inl.h
index 36e166e926..1a10773390 100644
--- a/deps/v8/src/lithium-inl.h
+++ b/deps/v8/src/lithium-inl.h
@@ -19,6 +19,8 @@
#include "src/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/lithium-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/lithium-x87.h" // NOLINT
#else
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index d57a2dd4ac..c15e9dbee2 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -18,6 +18,9 @@
#elif V8_TARGET_ARCH_ARM
#include "src/arm/lithium-arm.h" // NOLINT
#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/lithium-mips.h" // NOLINT
#include "src/mips/lithium-codegen-mips.h" // NOLINT
@@ -410,7 +413,58 @@ Representation LChunk::LookupLiteralRepresentation(
}
+static void AddWeakObjectToCodeDependency(Isolate* isolate,
+ Handle<HeapObject> object,
+ Handle<Code> code) {
+ Handle<WeakCell> cell = Code::WeakCellFor(code);
+ Heap* heap = isolate->heap();
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
+ dep = DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
+ heap->AddWeakObjectToCodeDependency(object, dep);
+}
+
+
+void LChunk::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const {
+ DCHECK(code->is_optimized_code());
+ ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<HeapObject> > objects(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::CELL &&
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
+ objects.Add(Handle<HeapObject>(it.rinfo()->target_cell()), zone());
+ } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ maps.Add(map, zone());
+ } else {
+ Handle<HeapObject> object(
+ HeapObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
+ }
+ }
+ }
+ for (int i = 0; i < maps.length(); i++) {
+ Map::AddDependentCode(maps.at(i), DependentCode::kWeakCodeGroup, code);
+ }
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate(), objects.at(i), code);
+ }
+ if (FLAG_enable_ool_constant_pool) {
+ code->constant_pool()->set_weak_object_state(
+ ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE);
+ }
+ code->set_can_have_weak_objects(true);
+}
+
+
void LChunk::CommitDependencies(Handle<Code> code) const {
+ if (!code->is_optimized_code()) return;
+ HandleScope scope(isolate());
+
for (MapSet::const_iterator it = deprecation_dependencies_.begin(),
iend = deprecation_dependencies_.end(); it != iend; ++it) {
Handle<Map> map = *it;
@@ -428,6 +482,7 @@ void LChunk::CommitDependencies(Handle<Code> code) const {
}
info_->CommitDependencies(code);
+ RegisterWeakObjectsInOptimizedCode(code);
}
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 83f760d672..991bcf034b 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -683,6 +683,7 @@ class LChunk : public ZoneObject {
typedef zone_allocator<Handle<Map> > MapAllocator;
typedef std::set<Handle<Map>, MapLess, MapAllocator> MapSet;
+ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const;
void CommitDependencies(Handle<Code> code) const;
CompilationInfo* info_;
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 29eaa974a6..8da3d52f55 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -1748,13 +1748,12 @@ class MultipleFunctionTarget {
// Drops all call frame matched by target and all frames above them.
-template<typename TARGET>
-static const char* DropActivationsInActiveThreadImpl(
- Isolate* isolate,
- TARGET& target, // NOLINT
- bool do_drop) {
+template <typename TARGET>
+static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
+ TARGET& target, // NOLINT
+ bool do_drop) {
Debug* debug = isolate->debug();
- Zone zone(isolate);
+ Zone zone;
Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
@@ -1858,14 +1857,14 @@ static const char* DropActivationsInActiveThreadImpl(
static const char* DropActivationsInActiveThread(
Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
MultipleFunctionTarget target(shared_info_array, result);
+ Isolate* isolate = shared_info_array->GetIsolate();
- const char* message = DropActivationsInActiveThreadImpl(
- shared_info_array->GetIsolate(), target, do_drop);
+ const char* message =
+ DropActivationsInActiveThreadImpl(isolate, target, do_drop);
if (message) {
return message;
}
- Isolate* isolate = shared_info_array->GetIsolate();
int array_len = GetArrayLength(shared_info_array);
// Replace "blocked on active" with "replaced on active" status.
@@ -2027,8 +2026,8 @@ class SingleFrameTarget {
const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
SingleFrameTarget target(frame);
- const char* result = DropActivationsInActiveThreadImpl(
- frame->isolate(), target, true);
+ const char* result =
+ DropActivationsInActiveThreadImpl(frame->isolate(), target, true);
if (result != NULL) {
return result;
}
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index ef285e6d68..3856f6062f 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -22,10 +22,10 @@ class Log {
void stop() { is_stopped_ = true; }
static bool InitLogAtStart() {
- return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc
- || FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp
- || FLAG_ll_prof || FLAG_perf_basic_prof || FLAG_perf_jit_prof
- || FLAG_log_internal_timer_events;
+ return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
+ FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp ||
+ FLAG_ll_prof || FLAG_perf_basic_prof || FLAG_perf_jit_prof ||
+ FLAG_log_internal_timer_events || FLAG_prof_cpp;
}
// Frees all resources acquired in Initialize and Open... functions.
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 3eede365b7..35cf0aa90b 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -271,7 +271,7 @@ PerfBasicLogger::PerfBasicLogger()
CHECK_NE(size, -1);
perf_output_handle_ =
base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
- CHECK_NE(perf_output_handle_, NULL);
+ CHECK_NOT_NULL(perf_output_handle_);
setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
}
@@ -402,6 +402,8 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "x32";
#elif V8_TARGET_ARCH_ARM
const char arch[] = "arm";
+#elif V8_TARGET_ARCH_PPC
+ const char arch[] = "ppc";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
#elif V8_TARGET_ARCH_X87
@@ -901,7 +903,7 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
void Logger::SharedLibraryEvent(const std::string& library_path,
uintptr_t start,
uintptr_t end) {
- if (!log_->IsEnabled() || !FLAG_prof) return;
+ if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
Log::MessageBuilder msg(log_);
msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR,
library_path.c_str(), start, end);
@@ -909,9 +911,10 @@ void Logger::SharedLibraryEvent(const std::string& library_path,
}
-void Logger::CodeDeoptEvent(Code* code) {
- if (!log_->IsEnabled()) return;
- DCHECK(FLAG_log_internal_timer_events);
+void Logger::CodeDeoptEvent(Code* code, int bailout_id, Address from,
+ int fp_to_sp_delta) {
+ PROFILER_LOG(CodeDeoptEvent(code, bailout_id, from, fp_to_sp_delta));
+ if (!log_->IsEnabled() || !FLAG_log_internal_timer_events) return;
Log::MessageBuilder msg(log_);
int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
msg.Append("code-deopt,%ld,%d", since_epoch, code->CodeSize());
@@ -1514,7 +1517,7 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
void Logger::TickEvent(TickSample* sample, bool overflow) {
- if (!log_->IsEnabled() || !FLAG_prof) return;
+ if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
@@ -1859,9 +1862,9 @@ bool Logger::SetUp(Isolate* isolate) {
is_logging_ = true;
}
- if (FLAG_log_internal_timer_events || FLAG_prof) timer_.Start();
+ if (FLAG_log_internal_timer_events || FLAG_prof_cpp) timer_.Start();
- if (FLAG_prof) {
+ if (FLAG_prof_cpp) {
profiler_ = new Profiler(isolate);
is_logging_ = true;
profiler_->Engage();
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index abf35f0ebc..bb7ff32e7d 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -292,7 +292,8 @@ class Logger {
uintptr_t start,
uintptr_t end);
- void CodeDeoptEvent(Code* code);
+ void CodeDeoptEvent(Code* code, int bailout_id, Address from,
+ int fp_to_sp_delta);
void CurrentTimeEvent();
void TimerEvent(StartEnd se, const char* name);
diff --git a/deps/v8/src/lookup-inl.h b/deps/v8/src/lookup-inl.h
index 0c9cc91b2f..ffc02e7878 100644
--- a/deps/v8/src/lookup-inl.h
+++ b/deps/v8/src/lookup-inl.h
@@ -64,9 +64,9 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* map,
}
has_property_ = true;
switch (property_details_.kind()) {
- case v8::internal::DATA:
+ case v8::internal::kData:
return DATA;
- case v8::internal::ACCESSOR:
+ case v8::internal::kAccessor:
return ACCESSOR;
}
case ACCESSOR:
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 8088f4dc5d..672c026c3c 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -102,11 +102,13 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
DCHECK(HolderIsReceiverOrHiddenPrototype());
Handle<JSObject> holder = GetHolder<JSObject>();
if (holder_map_->is_dictionary_map()) {
- PropertyDetails details(attributes, FIELD, 0);
+ PropertyDetails details(attributes, v8::internal::DATA, 0);
JSObject::SetNormalizedProperty(holder, name(), value, details);
} else {
- holder_map_ = Map::ReconfigureDataProperty(holder_map_, descriptor_number(),
- attributes);
+ holder_map_ = Map::ReconfigureExistingProperty(
+ holder_map_, descriptor_number(), i::kData, attributes);
+ holder_map_ =
+ Map::PrepareForDataProperty(holder_map_, descriptor_number(), value);
JSObject::MigrateToMap(holder, holder_map_);
}
@@ -118,10 +120,9 @@ void LookupIterator::PrepareTransitionToDataProperty(
Handle<Object> value, PropertyAttributes attributes,
Object::StoreFromKeyed store_mode) {
if (state_ == TRANSITION) return;
- DCHECK(state_ != LookupIterator::ACCESSOR ||
- GetAccessors()->IsDeclaredAccessorInfo());
+ DCHECK(state_ != LookupIterator::ACCESSOR);
DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
-
+ DCHECK(!IsSpecialNumericIndex());
// Can only be called when the receiver is a JSObject. JSProxy has to be
// handled via a trap. Adding properties to primitive values is not
// observable.
@@ -132,9 +133,22 @@ void LookupIterator::PrepareTransitionToDataProperty(
return;
}
- transition_map_ = Map::TransitionToDataProperty(
+ auto transition = Map::TransitionToDataProperty(
handle(receiver->map(), isolate_), name_, value, attributes, store_mode);
state_ = TRANSITION;
+ transition_ = transition;
+
+ if (receiver->IsGlobalObject()) {
+ // Install a property cell.
+ InternalizeName();
+ auto cell = GlobalObject::EnsurePropertyCell(
+ Handle<GlobalObject>::cast(receiver), name());
+ DCHECK(cell->value()->IsTheHole());
+ transition_ = cell;
+ } else if (transition->GetBackPointer()->IsMap()) {
+ property_details_ = transition->GetLastDescriptorDetails();
+ has_property_ = true;
+ }
}
@@ -142,8 +156,9 @@ void LookupIterator::ApplyTransitionToDataProperty() {
DCHECK_EQ(TRANSITION, state_);
Handle<JSObject> receiver = GetStoreTarget();
+ if (receiver->IsGlobalObject()) return;
holder_ = receiver;
- holder_map_ = transition_map_;
+ holder_map_ = transition_map();
JSObject::MigrateToMap(receiver, holder_map_);
ReloadPropertyInformation();
}
@@ -174,7 +189,7 @@ void LookupIterator::TransitionToAccessorProperty(
}
// Install the accessor into the dictionary-mode object.
- PropertyDetails details(attributes, CALLBACKS, 0);
+ PropertyDetails details(attributes, ACCESSOR_CONSTANT, 0);
Handle<AccessorPair> pair;
if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
pair = Handle<AccessorPair>::cast(GetAccessors());
@@ -228,7 +243,7 @@ Handle<Object> LookupIterator::FetchValue() const {
if (holder_map_->IsGlobalObjectMap()) {
result = PropertyCell::cast(result)->value();
}
- } else if (property_details_.type() == v8::internal::FIELD) {
+ } else if (property_details_.type() == v8::internal::DATA) {
FieldIndex field_index = FieldIndex::ForDescriptor(*holder_map_, number_);
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
@@ -239,10 +254,18 @@ Handle<Object> LookupIterator::FetchValue() const {
}
+int LookupIterator::GetAccessorIndex() const {
+ DCHECK(has_property_);
+ DCHECK(!holder_map_->is_dictionary_map());
+ DCHECK_EQ(v8::internal::ACCESSOR_CONSTANT, property_details_.type());
+ return descriptor_number();
+}
+
+
int LookupIterator::GetConstantIndex() const {
DCHECK(has_property_);
DCHECK(!holder_map_->is_dictionary_map());
- DCHECK_EQ(v8::internal::CONSTANT, property_details_.type());
+ DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
return descriptor_number();
}
@@ -250,7 +273,7 @@ int LookupIterator::GetConstantIndex() const {
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
DCHECK(!holder_map_->is_dictionary_map());
- DCHECK_EQ(v8::internal::FIELD, property_details_.type());
+ DCHECK_EQ(v8::internal::DATA, property_details_.type());
int index =
holder_map_->instance_descriptors()->GetFieldIndex(descriptor_number());
bool is_double = representation().IsDouble();
@@ -261,7 +284,7 @@ FieldIndex LookupIterator::GetFieldIndex() const {
Handle<HeapType> LookupIterator::GetFieldType() const {
DCHECK(has_property_);
DCHECK(!holder_map_->is_dictionary_map());
- DCHECK_EQ(v8::internal::FIELD, property_details_.type());
+ DCHECK_EQ(v8::internal::DATA, property_details_.type());
return handle(
holder_map_->instance_descriptors()->GetFieldType(descriptor_number()),
isolate_);
@@ -301,10 +324,10 @@ Handle<Object> LookupIterator::WriteDataValue(Handle<Object> value) {
} else {
property_dictionary->ValueAtPut(dictionary_entry(), *value);
}
- } else if (property_details_.type() == v8::internal::FIELD) {
+ } else if (property_details_.type() == v8::internal::DATA) {
holder->WriteToField(descriptor_number(), *value);
} else {
- DCHECK_EQ(v8::internal::CONSTANT, property_details_.type());
+ DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
}
return value;
}
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index a2e0d4d799..5a5466ebe4 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -46,7 +46,7 @@ class LookupIterator FINAL BASE_EMBEDDED {
Configuration configuration = PROTOTYPE_CHAIN)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
- property_details_(NONE, FIELD, 0),
+ property_details_(NONE, v8::internal::DATA, 0),
isolate_(name->GetIsolate()),
name_(name),
receiver_(receiver),
@@ -61,7 +61,7 @@ class LookupIterator FINAL BASE_EMBEDDED {
Configuration configuration = PROTOTYPE_CHAIN)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
- property_details_(NONE, FIELD, 0),
+ property_details_(NONE, v8::internal::DATA, 0),
isolate_(name->GetIsolate()),
name_(name),
holder_map_(holder->map(), isolate_),
@@ -88,7 +88,7 @@ class LookupIterator FINAL BASE_EMBEDDED {
bool is_dictionary_holder() const { return holder_map_->is_dictionary_map(); }
Handle<Map> transition_map() const {
DCHECK_EQ(TRANSITION, state_);
- return transition_map_;
+ return Handle<Map>::cast(transition_);
}
template <class T>
Handle<T> GetHolder() const {
@@ -107,13 +107,9 @@ class LookupIterator FINAL BASE_EMBEDDED {
PropertyAttributes attributes,
Object::StoreFromKeyed store_mode);
bool IsCacheableTransition() {
- bool cacheable =
- state_ == TRANSITION && transition_map()->GetBackPointer()->IsMap();
- if (cacheable) {
- property_details_ = transition_map_->GetLastDescriptorDetails();
- has_property_ = true;
- }
- return cacheable;
+ if (state_ != TRANSITION) return false;
+ return transition_->IsPropertyCell() ||
+ transition_map()->GetBackPointer()->IsMap();
}
void ApplyTransitionToDataProperty();
void ReconfigureDataProperty(Handle<Object> value,
@@ -132,8 +128,13 @@ class LookupIterator FINAL BASE_EMBEDDED {
}
FieldIndex GetFieldIndex() const;
Handle<HeapType> GetFieldType() const;
+ int GetAccessorIndex() const;
int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
+ Handle<PropertyCell> GetTransitionPropertyCell() const {
+ DCHECK_EQ(TRANSITION, state_);
+ return Handle<PropertyCell>::cast(transition_);
+ }
Handle<Object> GetAccessors() const;
Handle<Object> GetDataValue() const;
// Usually returns the value that was passed in, but may perform
@@ -193,7 +194,7 @@ class LookupIterator FINAL BASE_EMBEDDED {
Isolate* isolate_;
Handle<Name> name_;
Handle<Map> holder_map_;
- Handle<Map> transition_map_;
+ Handle<Object> transition_;
Handle<Object> receiver_;
Handle<JSReceiver> holder_;
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 2501f806c9..166ac428b5 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -64,6 +64,13 @@ const int kInvalidProtoDepth = -1;
#include "src/arm/assembler-arm-inl.h"
#include "src/code.h" // NOLINT, must be after assembler_*.h
#include "src/arm/macro-assembler-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/constants-ppc.h"
+#include "src/assembler.h" // NOLINT
+#include "src/ppc/assembler-ppc.h" // NOLINT
+#include "src/ppc/assembler-ppc-inl.h"
+#include "src/code.h" // NOLINT, must be after assembler_*.h
+#include "src/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/constants-mips.h"
#include "src/assembler.h" // NOLINT
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 1571144f9b..93a5563c19 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -39,22 +39,10 @@ const NEW_TWO_BYTE_STRING = false;
const GETTER = 0;
const SETTER = 1;
-# These definitions must match the index of the properties in objects.h.
-const kApiTagOffset = 0;
-const kApiPropertyListOffset = 1;
-const kApiSerialNumberOffset = 3;
-const kApiConstructorOffset = 3;
-const kApiPrototypeTemplateOffset = 5;
-const kApiParentTemplateOffset = 6;
-const kApiFlagOffset = 14;
-
const NO_HINT = 0;
const NUMBER_HINT = 1;
const STRING_HINT = 2;
-const kFunctionTag = 0;
-const kNewObjectTag = 1;
-
# For date.js.
const HoursPerDay = 24;
const MinutesPerHour = 60;
@@ -65,12 +53,6 @@ const msPerHour = 3600000;
const msPerDay = 86400000;
const msPerMonth = 2592000000;
-# For apinatives.js
-const kUninitialized = -1;
-const kReadOnlyPrototypeBit = 3;
-const kRemovePrototypeBit = 4; # For FunctionTemplateInfo, matches objects.h
-const kDoNotCacheBit = 5; # For FunctionTemplateInfo, matches objects.h
-
# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
const kInvalidDate = 'Invalid Date';
const kDayZeroInJulianDay = 2440588;
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index aec3469035..1dbc0fafc5 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -43,22 +43,24 @@ class SourceInfo;
class MessageLocation {
public:
- MessageLocation(Handle<Script> script,
- int start_pos,
- int end_pos)
+ MessageLocation(Handle<Script> script, int start_pos, int end_pos,
+ Handle<JSFunction> function = Handle<JSFunction>())
: script_(script),
start_pos_(start_pos),
- end_pos_(end_pos) { }
+ end_pos_(end_pos),
+ function_(function) {}
MessageLocation() : start_pos_(-1), end_pos_(-1) { }
Handle<Script> script() const { return script_; }
int start_pos() const { return start_pos_; }
int end_pos() const { return end_pos_; }
+ Handle<JSFunction> function() const { return function_; }
private:
Handle<Script> script_;
int start_pos_;
int end_pos_;
+ Handle<JSFunction> function_;
};
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 39887d6814..b49556c6a1 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -50,8 +50,6 @@ var kMessages = {
no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"],
- toMethod_non_function: ["Function.prototype.toMethod was called on ", "%0", ", which is a ", "%1", " and not a function"],
- toMethod_non_object: ["Function.prototype.toMethod: home object ", "%0", " is not an object"],
flags_getter_non_object: ["RegExp.prototype.flags getter called on non-object ", "%0"],
invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
@@ -155,9 +153,6 @@ var kMessages = {
strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
strict_octal_literal: ["Octal literals are not allowed in strict mode."],
template_octal_literal: ["Octal literals are not allowed in template strings."],
- strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
- accessor_data_property: ["Object literal may not have data and accessor property with the same name"],
- accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"],
strict_delete: ["Delete of an unqualified identifier in strict mode."],
strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
strict_const: ["Use of const in strict mode."],
@@ -166,10 +161,15 @@ var kMessages = {
strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
strict_caller: ["Illegal access to a strict mode caller function."],
+ strong_arguments: ["Please don't use 'arguments' in strong mode, use '...args' instead"],
+ strong_equal: ["Please don't use '==' or '!=' in strong mode, use '===' or '!==' instead"],
+ strong_delete: ["Please don't use 'delete' in strong mode, use maps or sets instead"],
+ strong_var: ["Please don't use 'var' in strong mode, use 'let' or 'const' instead"],
+ strong_for_in: ["Please don't use 'for'-'in' loops in strong mode, use 'for'-'of' instead"],
+ strong_empty: ["Please don't use empty sub-statements in strong mode, make them explicit with '{}' instead"],
+ sloppy_lexical: ["Block-scoped declarations (let, const, function, class) not yet supported outside strict mode"],
malformed_arrow_function_parameter_list: ["Malformed arrow function parameter list"],
generator_poison_pill: ["'caller' and 'arguments' properties may not be accessed on generator functions."],
- unprotected_let: ["Illegal let declaration in unprotected statement context."],
- unprotected_const: ["Illegal const declaration in unprotected statement context."],
cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
harmony_const_assign: ["Assignment to constant variable."],
@@ -177,14 +177,16 @@ var kMessages = {
symbol_to_primitive: ["Cannot convert a Symbol wrapper object to a primitive value"],
symbol_to_number: ["Cannot convert a Symbol value to a number"],
invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
- module_type_error: ["Module '", "%0", "' used improperly"],
module_export_undefined: ["Export '", "%0", "' is not defined in module"],
unexpected_super: ["'super' keyword unexpected here"],
extends_value_not_a_function: ["Class extends value ", "%0", " is not a function or null"],
prototype_parent_not_an_object: ["Class extends value does not have valid prototype property ", "%0"],
duplicate_constructor: ["A class may only have one constructor"],
- sloppy_lexical: ["Block-scoped declarations (let, const, function, class) not yet supported outside strict mode"],
- super_constructor_call: ["A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported."]
+ super_constructor_call: ["A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported."],
+ duplicate_proto: ["Duplicate __proto__ fields are not allowed in object literals"],
+ param_after_rest: ["Rest parameter must be last formal parameter"],
+ constructor_noncallable: ["Class constructors cannot be invoked without 'new'"],
+ array_not_subclassable: ["Subclassing Arrays is not currently supported."]
};
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 1cd9361e9a..1fdb3e97e5 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -126,10 +126,10 @@ void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
Assembler::JumpLabelToJumpRegister(pc_);
}
}
- if (IsInternalReference(rmode_)) {
+ if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
- int count = Assembler::RelocateInternalReference(p, delta);
+ int count = Assembler::RelocateInternalReference(rmode_, p, delta);
CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index d6a4f793f4..e7cfd57006 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -197,7 +197,8 @@ Register ToRegister(int num) {
// Implementation of RelocInfo.
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE;
+ 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
bool RelocInfo::IsCodedSpecially() {
@@ -662,8 +663,18 @@ bool Assembler::IsAndImmediate(Instr instr) {
}
-int Assembler::target_at(int32_t pos) {
+int Assembler::target_at(int32_t pos, bool is_internal) {
Instr instr = instr_at(pos);
+ if (is_internal) {
+ if (instr == 0) {
+ return kEndOfChain;
+ } else {
+ int32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ int32_t delta = instr_address - instr;
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
+ }
if ((instr & ~kImm16Mask) == 0) {
// Emitted label constant, not part of a branch.
if (instr == 0) {
@@ -673,8 +684,6 @@ int Assembler::target_at(int32_t pos) {
return (imm18 + pos);
}
}
- // Check we have a branch or jump instruction.
- DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmectic shifts for signed integers.
if (IsBranch(instr)) {
@@ -702,7 +711,7 @@ int Assembler::target_at(int32_t pos) {
DCHECK(pos > delta);
return pos - delta;
}
- } else {
+ } else if (IsJ(instr)) {
int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
if (imm28 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
@@ -714,12 +723,22 @@ int Assembler::target_at(int32_t pos) {
DCHECK(pos > delta);
return pos - delta;
}
+ } else {
+ UNREACHABLE();
+ return 0;
}
}
-void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
+void Assembler::target_at_put(int32_t pos, int32_t target_pos,
+ bool is_internal) {
Instr instr = instr_at(pos);
+
+ if (is_internal) {
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
+ instr_at_put(pos, imm);
+ return;
+ }
if ((instr & ~kImm16Mask) == 0) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
@@ -728,7 +747,6 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
return;
}
- DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
if (IsBranch(instr)) {
int32_t imm18 = target_pos - (pos + kBranchPCOffset);
DCHECK((imm18 & 3) == 0);
@@ -752,7 +770,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
instr_lui | ((imm & kHiMask) >> kLuiShift));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
- } else {
+ } else if (IsJ(instr)) {
uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
imm28 &= kImm28Mask;
DCHECK((imm28 & 3) == 0);
@@ -762,6 +780,8 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
DCHECK(is_uint26(imm26));
instr_at_put(pos, instr | (imm26 & kImm26Mask));
+ } else {
+ UNREACHABLE();
}
}
@@ -782,7 +802,8 @@ void Assembler::print(Label* L) {
} else {
PrintF("%d\n", instr);
}
- next(&l);
+ next(&l, internal_reference_positions_.find(l.pos()) !=
+ internal_reference_positions_.end());
}
} else {
PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
@@ -793,6 +814,7 @@ void Assembler::print(Label* L) {
void Assembler::bind_to(Label* L, int pos) {
DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
int32_t trampoline_pos = kInvalidSlotPos;
+ bool is_internal = false;
if (L->is_linked() && !trampoline_emitted_) {
unbound_labels_count_--;
next_buffer_check_ += kTrampolineSlotsSize;
@@ -801,23 +823,27 @@ void Assembler::bind_to(Label* L, int pos) {
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
int32_t dist = pos - fixup_pos;
- next(L); // Call next before overwriting link with target at fixup_pos.
+ is_internal = internal_reference_positions_.find(fixup_pos) !=
+ internal_reference_positions_.end();
+ next(L, is_internal); // Call next before overwriting link with target at
+ // fixup_pos.
Instr instr = instr_at(fixup_pos);
- if (IsBranch(instr)) {
+ if (is_internal) {
+ target_at_put(fixup_pos, pos, is_internal);
+ } else if (!is_internal && IsBranch(instr)) {
if (dist > kMaxBranchOffset) {
if (trampoline_pos == kInvalidSlotPos) {
trampoline_pos = get_trampoline_entry(fixup_pos);
CHECK(trampoline_pos != kInvalidSlotPos);
}
DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos);
+ target_at_put(fixup_pos, trampoline_pos, false);
fixup_pos = trampoline_pos;
dist = pos - fixup_pos;
}
- target_at_put(fixup_pos, pos);
+ target_at_put(fixup_pos, pos, false);
} else {
- DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
- target_at_put(fixup_pos, pos);
+ target_at_put(fixup_pos, pos, false);
}
}
L->bind_to(pos);
@@ -835,9 +861,9 @@ void Assembler::bind(Label* L) {
}
-void Assembler::next(Label* L) {
+void Assembler::next(Label* L, bool is_internal) {
DCHECK(L->is_linked());
- int link = target_at(L->pos());
+ int link = target_at(L->pos(), is_internal);
if (link == kEndOfChain) {
L->Unuse();
} else {
@@ -2325,66 +2351,58 @@ void Assembler::bc1t(int16_t offset, uint16_t cc) {
// Debugging.
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
+int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
+ intptr_t pc_delta) {
Instr instr = instr_at(pc);
- DCHECK(IsJ(instr) || IsLui(instr));
- if (IsLui(instr)) {
- Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
- DCHECK(IsOri(instr_ori));
- int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
- if (imm == kEndOfJumpChain) {
- return 0; // Number of instructions patched.
- }
- imm += pc_delta;
- DCHECK((imm & 3) == 0);
- instr_lui &= ~kImm16Mask;
- instr_ori &= ~kImm16Mask;
-
- instr_at_put(pc + 0 * Assembler::kInstrSize,
- instr_lui | ((imm >> kLuiShift) & kImm16Mask));
- instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
- return 2; // Number of instructions patched.
- } else {
- uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
- if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
+ if (RelocInfo::IsInternalReference(rmode)) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc);
+ if (*p == 0) {
return 0; // Number of instructions patched.
}
- imm28 += pc_delta;
- imm28 &= kImm28Mask;
- DCHECK((imm28 & 3) == 0);
+ *p += pc_delta;
+ return 1; // Number of instructions patched.
+ } else {
+ DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
+ if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
+ DCHECK(IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ if (imm == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm += pc_delta;
+ DCHECK((imm & 3) == 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ return 2; // Number of instructions patched.
+ } else if (IsJ(instr)) {
+ uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm28 += pc_delta;
+ imm28 &= kImm28Mask;
+ DCHECK((imm28 & 3) == 0);
- instr &= ~kImm26Mask;
- uint32_t imm26 = imm28 >> 2;
- DCHECK(is_uint26(imm26));
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ DCHECK(is_uint26(imm26));
- instr_at_put(pc, instr | (imm26 & kImm26Mask));
- return 1; // Number of instructions patched.
+ instr_at_put(pc, instr | (imm26 & kImm26Mask));
+ return 1; // Number of instructions patched.
+ } else {
+ UNREACHABLE();
+ return 0;
+ }
}
}
@@ -2425,12 +2443,12 @@ void Assembler::GrowBuffer() {
// Relocate runtime entries.
for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
+ rmode == RelocInfo::INTERNAL_REFERENCE) {
byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
- RelocateInternalReference(p, pc_delta);
+ RelocateInternalReference(rmode, p, pc_delta);
}
}
-
DCHECK(!overflow());
}
@@ -2449,6 +2467,21 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dd(Label* label) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ if (label->is_bound()) {
+ uint32_t data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+ } else {
+ uint32_t target_pos = jump_address(label);
+ emit(target_pos);
+ internal_reference_positions_.insert(label->pos());
+ }
+}
+
+
void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) =
@@ -2529,7 +2562,7 @@ void Assembler::CheckTrampolinePool() {
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and available
// to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
}
@@ -2575,7 +2608,7 @@ Address Assembler::target_address_at(Address pc) {
// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
// OS::nan_value() returns a qNaN.
void Assembler::QuietNaN(HeapObject* object) {
- HeapNumber::cast(object)->set_value(base::OS::nan_value());
+ HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index c6b12b76d7..89af82ad1a 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -38,6 +38,8 @@
#include <stdio.h>
+#include <set>
+
#include "src/assembler.h"
#include "src/mips/constants-mips.h"
#include "src/serialize.h"
@@ -1016,12 +1018,19 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg);
- static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, const int raw_position);
+
+
+ static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
+ intptr_t pc_delta);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dd(Label* label);
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
@@ -1120,10 +1129,10 @@ class Assembler : public AssemblerBase {
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
- int target_at(int32_t pos);
+ int target_at(int32_t pos, bool is_internal);
// Patch branch instruction at pos to branch to given branch target pos.
- void target_at_put(int32_t pos, int32_t target_pos);
+ void target_at_put(int32_t pos, int32_t target_pos, bool is_internal);
// Say if we need to relocate with this mode.
bool MustUseReg(RelocInfo::Mode rmode);
@@ -1292,7 +1301,7 @@ class Assembler : public AssemblerBase {
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
- void next(Label* L);
+ void next(Label* L, bool is_internal);
// One trampoline consists of:
// - space for trampoline slots,
@@ -1357,6 +1366,10 @@ class Assembler : public AssemblerBase {
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
static const int kInvalidSlotPos = -1;
+ // Internal reference positions, required for unbounded internal reference
+ // labels.
+ std::set<int> internal_reference_positions_;
+
Trampoline trampoline_;
bool internal_trampoline_exception_;
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 100195b58d..42a0bbe58b 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -138,6 +138,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// Tail call a stub.
+ __ mov(a3, a1);
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
@@ -315,6 +316,36 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
+static void Generate_Runtime_NewObject(MacroAssembler* masm,
+ bool create_memento,
+ Register original_constructor,
+ Label* count_incremented,
+ Label* allocated) {
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+ __ push(a2);
+ }
+
+ __ push(a1); // argument for Runtime_NewObject
+ __ push(original_constructor); // original constructor
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(t4, v0);
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ if (create_memento) {
+ __ jmp(count_incremented);
+ } else {
+ __ jmp(allocated);
+ }
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
@@ -322,6 +353,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a2 : allocation site or undefined
+ // -- a3 : original constructor
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -343,7 +375,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
- __ AssertUndefinedOrAllocationSite(a2, a3);
+ __ AssertUndefinedOrAllocationSite(a2, t0);
__ push(a2);
}
@@ -351,7 +383,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sll(a0, a0, kSmiTagSize); // Tag arguments count.
__ MultiPushReversed(a0.bit() | a1.bit());
- Label rt_call, allocated;
+ Label rt_call, allocated, normal_new, count_incremented;
+ __ Branch(&normal_new, eq, a1, Operand(a3));
+
+ // Original constructor and function are different.
+ Generate_Runtime_NewObject(masm, create_memento, a3, &count_incremented,
+ &allocated);
+ __ bind(&normal_new);
+
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
if (FLAG_inline_new) {
@@ -590,27 +629,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// a1: constructor function
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ lw(a2, MemOperand(sp, 2 * kPointerSize));
- __ push(a2);
- }
-
- __ push(a1); // Argument for Runtime_NewObject.
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
- } else {
- __ CallRuntime(Runtime::kNewObject, 1);
- }
- __ mov(t4, v0);
-
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
+ Generate_Runtime_NewObject(masm, create_memento, a1, &count_incremented,
+ &allocated);
// Receiver for constructor call allocated.
// t4: JSObject
@@ -739,6 +759,93 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a2 : allocation site or undefined
+ // -- a3 : original constructor
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // TODO(dslomov): support pretenuring
+ CHECK(!FLAG_pretenuring_call_new);
+
+ {
+ FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+
+ __ mov(t0, a0);
+ __ SmiTag(t0);
+ __ push(t0); // Smi-tagged arguments count.
+
+ // Push new.target.
+ __ push(a3);
+
+ // receiver is the hole.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ push(at);
+
+ // Set up pointer to last argument.
+ __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ // a0: number of arguments
+ // a1: constructor function
+ // a2: address of last argument (caller sp)
+ // t0: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ sll(at, t0, kPointerSizeLog2 - 1);
+ __ Addu(at, a2, Operand(at));
+ __ lw(at, MemOperand(at));
+ __ push(at);
+ __ bind(&entry);
+ __ Subu(t0, t0, Operand(2));
+ __ Branch(&loop, ge, t0, Operand(zero_reg));
+
+ __ Addu(a0, a0, Operand(1));
+
+ // Handle step in.
+ Label skip_step_in;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ li(a2, Operand(debug_step_in_fp));
+ __ lw(a2, MemOperand(a2));
+ __ Branch(&skip_step_in, eq, a2, Operand(zero_reg));
+
+ __ Push(a0, a1, a1);
+ __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
+ __ Pop(a0, a1);
+
+ __ bind(&skip_step_in);
+
+ // Call the function.
+ // a0: number of arguments
+ // a1: constructor function
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+
+ // Restore context from the frame.
+ // v0: result
+ // sp[0]: number of arguments (smi-tagged)
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ lw(a1, MemOperand(sp, 0));
+
+ // Leave construct frame.
+ }
+
+ __ sll(at, a1, kPointerSizeLog2 - 1);
+ __ Addu(sp, sp, Operand(at));
+ __ Addu(sp, sp, Operand(kPointerSize));
+ __ Jump(ra);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from JSEntryStub::GenerateBody
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 97eed74707..c4fc383552 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -588,7 +588,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ And(t2, lhs, Operand(rhs));
__ JumpIfNotSmi(t2, &not_smis, t0);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1001,6 +1001,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
@@ -1465,7 +1466,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Branch(&loop);
__ bind(&is_instance);
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
if (!HasCallSiteInlineCheck()) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
@@ -1480,7 +1481,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ PatchRelocatedValue(inline_site, scratch, v0);
if (!ReturnTrueFalseObject()) {
- DCHECK_EQ(Smi::FromInt(0), 0);
__ mov(v0, zero_reg);
}
}
@@ -1585,6 +1585,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@@ -1643,6 +1644,9 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
+
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1674,6 +1678,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// t2 : allocated object (tagged)
// t5 : mapped parameter count (tagged)
+ CHECK(!has_new_target());
+
__ lw(a1, MemOperand(sp, 0 * kPointerSize));
// a1 = parameter count (tagged)
@@ -1716,7 +1722,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
Label param_map_size;
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
__ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
__ sll(t5, a1, 1);
@@ -1931,6 +1937,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ if (has_new_target()) {
+ // Subtract 1 from smi-tagged arguments count.
+ __ Subu(a1, a1, Operand(2));
+ }
__ sw(a1, MemOperand(sp, 0));
__ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, a2, Operand(at));
@@ -2012,6 +2022,33 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
}
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // sp[0] : index of rest parameter
+ // sp[4] : number of parameters
+ // sp[8] : receiver displacement
+ // Check if the calling frame is an arguments adaptor frame.
+
+ Label runtime;
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Patch the arguments.length and the parameters pointer.
+ __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, a2, Operand(at));
+
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sw(a3, MemOperand(sp, 2 * kPointerSize));
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+}
+
+
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2704,6 +2741,17 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(a2, t1);
}
+ // Pass function as original constructor.
+ if (IsSuperConstructorCall()) {
+ __ li(t0, Operand(1 * kPointerSize));
+ __ sll(at, a0, kPointerSizeLog2);
+ __ Addu(t0, t0, Operand(at));
+ __ Addu(at, sp, Operand(t0));
+ __ lw(a3, MemOperand(at, 0));
+ } else {
+ __ mov(a3, a1);
+ }
+
// Jump to the function-specific construct stub.
Register jmp_reg = t0;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -2743,10 +2791,9 @@ static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id
+ // a2 - vector
Label miss;
- EmitLoadTypeFeedbackVector(masm, a2);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
__ Branch(&miss, ne, a1, Operand(at));
@@ -2761,6 +2808,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ Branch(&miss, ne, t1, Operand(at));
__ mov(a2, t0);
+ __ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@@ -2781,6 +2829,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
+ // a2 - vector
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
@@ -2791,13 +2840,31 @@ void CallICStub::Generate(MacroAssembler* masm) {
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, a2);
-
// The checks. First, does r1 match the recorded monomorphic target?
__ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, a2, Operand(t0));
__ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
- __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset));
+ __ Branch(&extra_checks_or_miss, ne, a1, Operand(t1));
+
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(a1, &extra_checks_or_miss);
__ bind(&have_js_function);
if (CallAsMethod()) {
@@ -2874,16 +2941,18 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
__ sw(t0, FieldMemOperand(a2, with_types_offset));
- // Store the function.
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
- __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sw(a1, MemOperand(t0, 0));
+ // Store the function. Use a stub since we need a frame for allocation.
+ // a2 - vector
+ // a3 - slot
+ // a1 - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(a1);
+ __ CallStub(&create_stub);
+ __ Pop(a1);
+ }
- // Update the write barrier.
- __ mov(t1, a1);
- __ RecordWrite(a2, t0, t1, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Branch(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
@@ -2905,26 +2974,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ lw(t0, MemOperand(sp, (arg_count() + 1) * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the function and feedback info.
- __ Push(t0, a1, a2, a3);
+ // Push the receiver and the function and feedback info.
+ __ Push(a1, a2, a3);
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
- ExternalReference miss = ExternalReference(IC_Utility(id),
- masm->isolate());
- __ CallExternalReference(miss, 4);
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 3);
- // Move result to a1 and exit the internal frame.
- __ mov(a1, v0);
- }
+ // Move result to a1 and exit the internal frame.
+ __ mov(a1, v0);
}
@@ -3856,12 +3919,14 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ And(a2, a1, a0);
__ JumpIfSmi(a2, &miss);
+ __ GetWeakValue(t0, cell);
__ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ Branch(&miss, ne, a2, Operand(known_map_));
- __ Branch(&miss, ne, a3, Operand(known_map_));
+ __ Branch(&miss, ne, a2, Operand(t0));
+ __ Branch(&miss, ne, a3, Operand(t0));
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
@@ -3951,7 +4016,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ sll(at, index, 1);
__ Addu(index, index, at);
@@ -4476,6 +4541,20 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
}
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallICStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallIC_ArrayStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4718,11 +4797,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- a0 : argc (only if argument_count() == ANY)
+ // -- a0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
// -- a1 : constructor
// -- a2 : AllocationSite or undefined
- // -- sp[0] : return address
- // -- sp[4] : last argument
+ // -- a3 : Original constructor
+ // -- sp[0] : last argument
// -----------------------------------
if (FLAG_debug_code) {
@@ -4743,6 +4822,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(a2, t0);
}
+ Label subclassing;
+ __ Branch(&subclassing, ne, a1, Operand(a3));
+
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -4756,6 +4838,29 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ // Subclassing.
+ __ bind(&subclassing);
+ __ Push(a1);
+ __ Push(a3);
+
+ // Adjust argc.
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ li(at, Operand(2));
+ __ addu(a0, a0, at);
+ break;
+ case NONE:
+ __ li(a0, Operand(2));
+ break;
+ case ONE:
+ __ li(a0, Operand(3));
+ break;
+ }
+
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
}
@@ -4833,12 +4938,154 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+static void CallApiFunctionAndReturn(
+ MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
+ MemOperand return_value_operand, MemOperand* context_restore_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address.is(a1) || function_address.is(a2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
+ __ lb(t9, MemOperand(t9, 0));
+ __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+ // Additional parameter is the address of the actual callback.
+ __ li(t9, Operand(thunk_ref));
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ __ mov(t9, function_address);
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ li(s3, Operand(next_address));
+ __ lw(s0, MemOperand(s3, kNextOffset));
+ __ lw(s1, MemOperand(s3, kLimitOffset));
+ __ lw(s2, MemOperand(s3, kLevelOffset));
+ __ Addu(s2, s2, Operand(1));
+ __ sw(s2, MemOperand(s3, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate);
+ stub.GenerateCall(masm, t9);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ lw(v0, return_value_operand);
+ __ bind(&return_value_loaded);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ sw(s0, MemOperand(s3, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ lw(a1, MemOperand(s3, kLevelOffset));
+ __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
+ }
+ __ Subu(s2, s2, Operand(1));
+ __ sw(s2, MemOperand(s3, kLevelOffset));
+ __ lw(at, MemOperand(s3, kLimitOffset));
+ __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+ // Check if the function scheduled an exception.
+ __ bind(&leave_exit_frame);
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ lw(t1, MemOperand(at));
+ __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+ __ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ lw(cp, *context_restore_operand);
+ }
+ if (stack_space_offset != kInvalidStackOffset) {
+ // ExitFrame contains four MIPS argument slots after DirectCEntryStub call
+ // so this must be accounted for.
+ __ lw(s0, MemOperand(sp, stack_space_offset + kCArgsSlotsSize));
+ } else {
+ __ li(s0, Operand(stack_space));
+ }
+ __ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN,
+ stack_space_offset != kInvalidStackOffset);
+
+ __ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
+ }
+ __ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ sw(s1, MemOperand(s3, kLimitOffset));
+ __ mov(s0, v0);
+ __ mov(a0, v0);
+ __ PrepareCallCFunction(1, s1);
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
+ 1);
+ __ mov(v0, s0);
+ __ jmp(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
// -- a0 : callee
// -- t0 : call_data
// -- a2 : holder
// -- a1 : api_function_address
+ // -- a3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -4853,10 +5100,6 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = a1;
Register context = cp;
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
-
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
@@ -4868,6 +5111,8 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
+ DCHECK(argc.is_immediate() || a3.is(argc.reg()));
+
// Save context, callee and call data.
__ Push(context, callee, call_data);
// Load context from callee.
@@ -4879,8 +5124,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
}
// Push return value and default return value.
__ Push(scratch, scratch);
- __ li(scratch,
- Operand(ExternalReference::isolate_address(isolate())));
+ __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
// Push isolate and holder.
__ Push(scratch, holder);
@@ -4900,36 +5144,69 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ Addu(a0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ sw(scratch, MemOperand(a0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
- __ sw(at, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(at, Operand(argc));
- __ sw(at, MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ if (argc.is_immediate()) {
+ // FunctionCallbackInfo::values_
+ __ Addu(at, scratch,
+ Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
+ __ sw(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(at, Operand(argc.immediate()));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
+ } else {
+ // FunctionCallbackInfo::values_
+ __ sll(at, argc.reg(), kPointerSizeLog2);
+ __ Addu(at, at, scratch);
+ __ Addu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
+ __ sw(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_
+ __ Addu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
+ __ sll(at, argc.reg(), kPointerSizeLog2);
+ __ sw(at, MemOperand(a0, 3 * kPointerSize));
+ }
+
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument.
int return_value_offset = 0;
- if (is_store) {
+ if (return_first_arg) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ int stack_space = 0;
+ int32_t stack_space_offset = 4 * kPointerSize;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_offset = kInvalidStackOffset;
+ }
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
+ stack_space_offset, return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
+ call_data_undefined);
+}
+
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- return_value_operand,
- &context_restore_operand);
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
@@ -4960,11 +5237,9 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kInvalidStackOffset,
+ MemOperand(fp, 6 * kPointerSize), NULL);
}
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index fbd4044316..a04b8cb0e7 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -750,7 +750,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ Addu(scratch1, array, Operand(kHeapObjectTag));
- __ sw(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ sw(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
scratch1,
@@ -771,15 +771,16 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Repurpose registers no longer in use.
Register hole_lower = elements;
Register hole_upper = length;
-
__ li(hole_lower, Operand(kHoleNanLower32));
+ __ li(hole_upper, Operand(kHoleNanUpper32));
+
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch3: begin of FixedDoubleArray element fields, not tagged
- __ Branch(USE_DELAY_SLOT, &entry);
- __ li(hole_upper, Operand(kHoleNanUpper32)); // In delay slot.
+
+ __ Branch(&entry);
__ bind(&only_change_map);
__ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -826,9 +827,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
// exponent
__ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
- __ bind(&entry);
__ addiu(scratch3, scratch3, kDoubleSize);
+ __ bind(&entry);
__ Branch(&loop, lt, scratch3, Operand(array_end));
__ bind(&done);
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 668481e9ba..22c3e6c18b 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -924,6 +924,7 @@ class Instruction {
// C/C++ argument slots size.
const int kCArgSlotCount = 4;
const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
+const int kInvalidStackOffset = -1;
// JS argument slots size.
const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
// Assembly builtins argument slots size.
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index e39b368978..3dfc64a605 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -162,6 +162,9 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
+ __ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ sw(fp, MemOperand(a2));
+
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index c1a8291480..1f12011803 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -134,7 +134,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ if (is_sloppy(info->language_mode()) && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ lw(at, MemOperand(sp, receiver_offset));
@@ -161,7 +161,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!info->function()->is_generator() || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -206,7 +206,7 @@ void FullCodeGenerator::Generate() {
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ push(a1);
- __ Push(info->scope()->GetScopeInfo());
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
@@ -249,6 +249,26 @@ void FullCodeGenerator::Generate() {
}
}
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Addu(a3, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ li(a2, Operand(Smi::FromInt(num_parameters)));
+ __ li(a1, Operand(Smi::FromInt(rest_index)));
+ __ Push(a3, a2, a1);
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, v0, a1, a2);
+ }
+
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
@@ -271,15 +291,19 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type);
+ ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, v0, a1, a2);
@@ -439,7 +463,11 @@ void FullCodeGenerator::EmitReturnSequence() {
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ int32_t arg_count = info_->scope()->num_parameters() + 1;
+ if (IsSubclassConstructor(info_->function()->kind())) {
+ arg_count++;
+ }
+ int32_t sp_delta = arg_count * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
@@ -922,15 +950,16 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
+ ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
- DCHECK(variable->interface()->IsFrozen());
+ DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(a1, scope_->ContextChainLength(scope_->ScriptScope()));
- __ lw(a1, ContextOperand(a1, variable->interface()->Index()));
+ __ lw(a1, ContextOperand(a1, descriptor->Index()));
__ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
// Assign it.
@@ -1229,7 +1258,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// For proxies, no filtering is done.
// TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK_EQ(Smi::FromInt(0), 0);
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ Branch(&update_each, eq, a2, Operand(zero_reg));
// Convert the entry to a string or (smi) 0 if it isn't a property
@@ -1247,6 +1276,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
// Generate code for the body of the loop.
@@ -1286,7 +1316,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1520,6 +1550,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
+ } else if (var->is_this()) {
+ CHECK(info_->function() != nullptr &&
+ (info_->function()->kind() & kSubclassConstructor) != 0);
+ // TODO(dslomov): implement 'this' hole check elimination.
+ skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
@@ -1671,11 +1706,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
__ push(v0); // Save result on stack.
@@ -1731,17 +1768,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ lw(a0, MemOperand(sp));
__ push(a0);
VisitForStackValue(value);
- if (property->emit_store()) {
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- __ Drop(2);
- }
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
break;
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = value;
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = value;
+ }
break;
}
}
@@ -1763,6 +1801,69 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(v0); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ lw(a0, MemOperand(sp)); // Duplicate receiver.
+ __ push(a0);
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ __ li(a0, Operand(Smi::FromInt(NONE)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ li(a0, Operand(Smi::FromInt(NONE)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ li(a0, Operand(Smi::FromInt(NONE)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ break;
+ }
+ }
+ }
+
if (expr->has_function()) {
DCHECK(result_saved);
__ lw(a0, MemOperand(sp));
@@ -1818,6 +1919,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
bool result_saved = false; // Is the result saved to the stack?
@@ -1957,19 +2059,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(v0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
- mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
+ EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
@@ -2351,7 +2449,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left_expr,
Expression* right_expr) {
Label done, smi_case, stub_call;
@@ -2372,7 +2469,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2456,9 +2553,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
- Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- DCHECK(key != NULL);
if (property->is_static()) {
__ lw(scratch, MemOperand(sp, kPointerSize)); // constructor
@@ -2466,24 +2561,29 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ lw(scratch, MemOperand(sp, 0)); // prototype
}
__ push(scratch);
- VisitForStackValue(key);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
- __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
break;
case ObjectLiteral::Property::SETTER:
- __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
break;
default:
@@ -2499,12 +2599,10 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
__ pop(a1);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2579,7 +2677,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
break;
}
@@ -2646,8 +2744,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(strict_mode())));
- __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
+ __ li(a0, Operand(Smi::FromInt(language_mode())));
+ __ Push(v0, cp, a1, a0); // Value, context, name, language mode.
__ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
// Assignment to var or initializing assignment to let/const in harmony
@@ -2662,7 +2760,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+ } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
}
@@ -2697,8 +2795,8 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(v0);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
+ __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
4);
}
@@ -2710,9 +2808,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(v0);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime(
+ (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
@@ -2730,7 +2829,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
- Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2753,8 +2853,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(v0);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
@@ -2769,8 +2867,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
- context()->Plug(v0);
}
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(v0);
}
@@ -2920,9 +3019,8 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(
- isolate(), arg_count, call_type);
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2950,8 +3048,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
- // t0: the strict mode.
- __ li(t0, Operand(Smi::FromInt(strict_mode())));
+ // t0: the language mode.
+ __ li(t0, Operand(Smi::FromInt(language_mode())));
// a1: the start position of the scope the calls resides in.
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
@@ -2963,8 +3061,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
- DCHECK(super_ref != NULL);
+void FullCodeGenerator::EmitLoadSuperConstructor() {
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(a0);
__ CallRuntime(Runtime::kGetPrototype, 1);
@@ -3085,11 +3182,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
}
} else if (call_type == Call::SUPER_CALL) {
- SuperReference* super_ref = callee->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ Push(result_register());
- VisitForStackValue(super_ref->this_var());
- EmitCall(expr, CallICState::METHOD);
+ EmitSuperConstructorCall(expr);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -3118,12 +3211,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- if (expr->expression()->IsSuperReference()) {
- EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
- __ Push(result_register());
- } else {
- VisitForStackValue(expr->expression());
- }
+ DCHECK(!expr->expression()->IsSuperReference());
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3157,6 +3246,66 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ if (!ValidateSuperCall(expr)) return;
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ push(result_register());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into a1 and a0.
+ __ li(a0, Operand(arg_count));
+ __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ UNREACHABLE();
+ /* TODO(dslomov): support pretenuring.
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ */
+ }
+
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ RecordJSReturnSite(expr);
+
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ Variable* this_var = super_ref->this_var()->var();
+ GetVar(a1, this_var);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ Label uninitialized_this;
+ __ Branch(&uninitialized_this, eq, a1, Operand(at));
+ __ li(a0, Operand(this_var->name()));
+ __ Push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
+ EmitVariableAssignment(this_var, Token::INIT_CONST);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3689,7 +3838,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -4036,6 +4185,64 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ Push(result_register());
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, args_set_up, runtime;
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ // default constructor has no arguments, so no adaptor frame means no args.
+ __ mov(a0, zero_reg);
+ __ Branch(&args_set_up);
+
+ // Copy arguments from adaptor frame.
+ {
+ __ bind(&adaptor_frame);
+ __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(a1, a1);
+
+ // Subtract 1 from arguments count, for new.target.
+ __ Addu(a1, a1, Operand(-1));
+ __ mov(a0, a1);
+
+ // Get arguments pointer in a2.
+ __ sll(at, a1, kPointerSizeLog2);
+ __ addu(a2, a2, at);
+ __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset));
+ Label loop;
+ __ bind(&loop);
+ // Pre-decrement a2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Addu(a2, a2, Operand(-kPointerSize));
+ __ lw(a3, MemOperand(a2));
+ __ Push(a3);
+ __ Addu(a1, a1, Operand(-1));
+ __ Branch(&loop, ne, a1, Operand(zero_reg));
+ }
+
+ __ bind(&args_set_up);
+ __ sll(at, a0, kPointerSizeLog2);
+ __ Addu(at, at, Operand(sp));
+ __ lw(a1, MemOperand(at, 0));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ context()->Plug(result_register());
+}
+
+
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
@@ -4055,7 +4262,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(2, args->length());
- DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
@@ -4456,7 +4663,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ li(a1, Operand(Smi::FromInt(strict_mode())));
+ __ li(a1, Operand(Smi::FromInt(language_mode())));
__ push(a1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4464,7 +4671,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- DCHECK(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(is_sloppy(language_mode()) || var->is_this());
if (var->IsUnallocated()) {
__ lw(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
@@ -4681,6 +4888,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4715,8 +4923,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4786,7 +4993,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index ecdaecf2b7..b8cae81e0d 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -98,7 +98,19 @@ void FastCloneShallowObjectDescriptor::Initialize(
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, a2, a3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2, a3, a1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
@@ -125,6 +137,16 @@ void CallFunctionWithFeedbackDescriptor::Initialize(
}
+void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a3, a2};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// a0 : number of arguments
// a1 : the function to call
@@ -299,6 +321,27 @@ void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
t0, // call_data
a2, // holder
a1, // api_function_address
+ a3, // actual number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Integer32(), // actual number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a0, // callee
+ t0, // call_data
+ a2, // holder
+ a1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index cdc68c8652..0dea629d3a 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -85,7 +85,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
}
@@ -143,8 +142,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() &&
- info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -339,7 +337,7 @@ bool LCodeGen::GenerateJumpTable() {
DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
@@ -815,9 +813,9 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
- const char* detail, Register src1,
- const Operand& src2) {
+ Register src1, const Operand& src2) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
@@ -858,21 +856,22 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ bind(&skip);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() ||
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
@@ -882,12 +881,12 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, Register src1,
- const Operand& src2) {
+ Deoptimizer::DeoptReason deopt_reason,
+ Register src1, const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2);
+ DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
}
@@ -909,6 +908,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1117,7 +1117,8 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ subu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
__ subu(dividend, zero_reg, dividend);
@@ -1149,7 +1150,8 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
__ bind(&remainder_not_zero);
}
}
@@ -1168,7 +1170,8 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
+ Operand(zero_reg));
}
// Check for kMinInt % -1, div will return kMinInt, which is not what we
@@ -1177,7 +1180,7 @@ void LCodeGen::DoModI(LModI* instr) {
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
@@ -1189,7 +1192,8 @@ void LCodeGen::DoModI(LModI* instr) {
// If we care about -0, test if the dividend is <0 and the result is 0.
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
+ Operand(zero_reg));
}
__ bind(&done);
}
@@ -1205,18 +1209,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1253,7 +1258,8 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1262,7 +1268,8 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Mul(scratch0(), result, Operand(divisor));
__ Subu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
+ Operand(zero_reg));
}
}
@@ -1281,14 +1288,16 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+ Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+ Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1297,12 +1306,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
+ Operand(zero_reg));
}
}
@@ -1348,14 +1358,15 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ Subu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
}
// Dividing by -1 is basically negation, unless we overflow.
__ Xor(scratch, scratch, result);
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(ge, instr, "overflow", scratch, Operand(zero_reg));
+ DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
+ Operand(zero_reg));
}
return;
}
@@ -1390,7 +1401,8 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1434,14 +1446,16 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+ Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+ Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1450,7 +1464,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@@ -1481,14 +1495,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
}
switch (constant) {
case -1:
if (overflow) {
__ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
+ Operand(zero_reg));
} else {
__ Subu(result, zero_reg, left);
}
@@ -1497,7 +1512,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
+ Operand(zero_reg));
}
__ mov(result, zero_reg);
break;
@@ -1549,7 +1565,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Mul(scratch, result, left, right);
}
__ sra(at, result, 31);
- DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1564,7 +1580,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
+ Operand(zero_reg));
__ bind(&done);
}
}
@@ -1628,7 +1645,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
- DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
+ Operand(zero_reg));
}
break;
case Token::SHL:
@@ -1663,7 +1681,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
+ Operand(zero_reg));
}
__ Move(result, left);
}
@@ -1678,7 +1697,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
- DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
+ Operand(zero_reg));
} else {
__ sll(result, left, shift_count);
}
@@ -1726,7 +1746,8 @@ void LCodeGen::DoSubI(LSubI* instr) {
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+ Operand(zero_reg));
}
}
@@ -1744,6 +1765,20 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
DCHECK(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
+#if V8_HOST_ARCH_IA32
+ // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
+ // builds.
+ uint64_t bits = instr->bits();
+ if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
+ V8_UINT64_C(0x7FF0000000000000)) {
+ uint32_t lo = static_cast<uint32_t>(bits);
+ uint32_t hi = static_cast<uint32_t>(bits >> 32);
+ __ li(at, Operand(lo));
+ __ li(scratch0(), Operand(hi));
+ __ Move(result, at, scratch0());
+ return;
+ }
+#endif
double v = instr->value();
__ Move(result, v);
}
@@ -1780,9 +1815,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(!scratch.is(object));
__ SmiTst(object, at);
- DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject, scratch,
+ Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1917,7 +1953,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+ Operand(zero_reg));
}
}
@@ -2030,8 +2067,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(a0));
DCHECK(ToRegister(instr->result()).is(v0));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
@@ -2178,7 +2214,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
}
const Register map = scratch0();
@@ -2234,7 +2270,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, "unexpected object", zero_reg,
+ DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
Operand(zero_reg));
}
}
@@ -2880,7 +2916,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ lw(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole", result, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
}
}
@@ -2935,7 +2971,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register payload = ToRegister(instr->temp());
__ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole", payload, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, payload, Operand(at));
}
// Store the value.
@@ -2954,7 +2990,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, "hole", result, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
@@ -2978,7 +3014,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, "hole", scratch, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
@@ -3058,7 +3094,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole", result, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
@@ -3194,8 +3230,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr, "negative value", result,
- Operand(0x80000000));
+ DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
+ result, Operand(0x80000000));
}
break;
case FLOAT32_ELEMENTS:
@@ -3248,7 +3284,8 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
- DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
+ Operand(kHoleNanUpper32));
}
}
@@ -3284,10 +3321,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
+ Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole", result, Operand(scratch));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
}
}
}
@@ -3433,10 +3471,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr, "not a JavaScript object", scratch,
+ DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
Operand(FIRST_SPEC_OBJECT_TYPE));
__ Branch(&result_in_receiver);
@@ -3472,7 +3510,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr, "too many arguments", length,
+ DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
@@ -3553,24 +3591,19 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- A1State a1_state) {
+ int formal_parameter_count, int arity,
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
+ Register function_reg = a1;
LPointerMap* pointers = instr->pointer_map();
if (can_invoke_directly) {
- if (a1_state == A1_UNINITIALIZED) {
- __ li(a1, function);
- }
-
// Change context.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
@@ -3579,7 +3612,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
__ Call(at);
// Set up deoptimization.
@@ -3588,7 +3621,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
}
}
@@ -3603,7 +3636,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
Label done;
Register exponent = scratch0();
@@ -3670,7 +3703,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
__ bind(&done);
}
@@ -3725,7 +3758,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3734,7 +3767,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ Branch(&done, ne, result, Operand(zero_reg));
__ Mfhc1(scratch1, input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ Operand(zero_reg));
__ bind(&done);
}
}
@@ -3767,7 +3801,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr, "overflow", scratch,
+ DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
@@ -3782,7 +3816,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
@@ -3801,7 +3835,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3810,7 +3844,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&check_sign_on_zero);
__ Mfhc1(scratch, input);
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
+ Operand(zero_reg));
}
__ bind(&done);
}
@@ -3876,7 +3911,7 @@ void LCodeGen::DoPower(LPower* instr) {
DCHECK(!t3.is(tagged_exponent));
__ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, "not a heap number", t3, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3935,9 +3970,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- A1_CONTAINS_TARGET);
+ instr->arity(), instr);
}
}
@@ -4046,8 +4079,30 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(a3));
+ DCHECK(vector_register.is(a2));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ li(vector_register, vector);
+ __ li(slot_register, Operand(Smi::FromInt(index)));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4225,7 +4280,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4248,7 +4303,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, "out of bounds", reg, operand);
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
}
}
@@ -4457,7 +4512,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4842,12 +4897,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
@@ -4863,7 +4918,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
@@ -4888,7 +4943,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
+ Operand(at));
}
// Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
@@ -4896,7 +4952,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
__ Mfhc1(scratch, result_reg);
- DeoptimizeIf(eq, instr, "minus zero", scratch,
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
@@ -4904,7 +4960,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -4969,12 +5025,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2,
- Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
+ scratch2, Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
- DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
+ Operand(at));
// Load the double value.
__ ldc1(double_scratch,
@@ -4989,7 +5046,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
except_flag,
kCheckForInexactConversion);
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -4997,7 +5054,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ Mfhc1(scratch1, double_scratch);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ Operand(zero_reg));
}
}
__ bind(&done);
@@ -5073,7 +5131,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -5081,7 +5139,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ Operand(zero_reg));
__ bind(&done);
}
}
@@ -5107,7 +5166,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -5115,19 +5174,20 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ Operand(zero_reg));
__ bind(&done);
}
}
__ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
- DeoptimizeIf(lt, instr, "overflow", scratch1, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
}
@@ -5135,7 +5195,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
}
}
@@ -5153,12 +5213,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+ Operand(first));
} else {
- DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first));
+ DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
+ Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last));
+ DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
+ Operand(last));
}
}
} else {
@@ -5169,11 +5232,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at,
- Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
+ at, Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+ Operand(tag));
}
}
}
@@ -5188,9 +5252,9 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object));
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
}
}
@@ -5206,7 +5270,8 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
+ Operand(zero_reg));
}
@@ -5260,7 +5325,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
}
__ bind(&success);
@@ -5298,7 +5363,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
@@ -5513,7 +5578,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ li(a2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -5721,7 +5786,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
type = Deoptimizer::LAZY;
}
- DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg,
+ DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
Operand(zero_reg));
}
@@ -5812,18 +5877,18 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(eq, instr, "undefined", object, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kUndefined, object, Operand(at));
Register null_value = t1;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- DeoptimizeIf(eq, instr, "null", object, Operand(null_value));
+ DeoptimizeIf(eq, instr, Deoptimizer::kNull, object, Operand(null_value));
__ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr, "not a JavaScript object", a1,
+ DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
Operand(LAST_JS_PROXY_TYPE));
Label use_cache, call_runtime;
@@ -5841,7 +5906,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
__ bind(&use_cache);
}
@@ -5861,7 +5926,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
__ bind(&done);
}
@@ -5871,7 +5936,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 43316e471b..9ba214993c 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -143,7 +143,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictMode strict_mode() const { return info()->strict_mode(); }
+ LanguageMode language_mode() const { return info()->language_mode(); }
Scope* scope() const { return scope_; }
@@ -210,18 +210,11 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
- enum A1State {
- A1_UNINITIALIZED,
- A1_CONTAINS_TARGET
- };
-
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- A1State a1_state);
+ int formal_parameter_count, int arity,
+ LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -229,12 +222,14 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::BailoutType bailout_type, const char* detail,
+ Deoptimizer::DeoptReason deopt_reason,
+ Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail = NULL, Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
+ void DeoptimizeIf(
+ Condition condition, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason,
+ Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 77dea5b869..8d1b45fa30 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -262,6 +262,20 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
+void LCallFunction::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ if (hydrogen()->HasVectorAndSlot()) {
+ stream->Add(" (type-feedback-vector ");
+ temp_vector()->PrintTo(stream);
+ stream->Add(" ");
+ temp_slot()->PrintTo(stream);
+ stream->Add(")");
+ }
+}
+
+
void LCallJSFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -1251,7 +1265,15 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
- LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(a3);
+ vector = FixedTemp(a2);
+ }
+
+ LCallFunction* call =
+ new (zone()) LCallFunction(context, function, slot, vector);
return MarkAsCall(DefineFixed(call, v0), instr);
}
@@ -1500,9 +1522,10 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+ int32_t constant_value = 0;
if (right->IsConstant()) {
HConstant* constant = HConstant::cast(right);
- int32_t constant_value = constant->Integer32Value();
+ constant_value = constant->Integer32Value();
// Constants -1, 0 and 1 can be optimized if the result can overflow.
// For other constants, it can be optimized only without overflow.
if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
@@ -1525,7 +1548,10 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
right_op = UseRegister(right);
}
LMulI* mul = new(zone()) LMulI(left_op, right_op);
- if (can_overflow || bailout_on_minus_zero) {
+ if (right_op->IsConstantOperand()
+ ? ((can_overflow && constant_value == -1) ||
+ (bailout_on_minus_zero && constant_value <= 0))
+ : (can_overflow || bailout_on_minus_zero)) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index ecffef7ef3..1ccba14bc8 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -1323,6 +1323,7 @@ class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
DECLARE_HYDROGEN_ACCESSOR(Constant)
double value() const { return hydrogen()->DoubleValue(); }
+ uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
};
@@ -1901,20 +1902,26 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LCallFunction(LOperand* context, LOperand* function) {
+ LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = function;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
int arity() const { return hydrogen()->argument_count() - 1; }
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -2187,7 +2194,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
@@ -2244,7 +2251,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 90c3499a9a..972530ee35 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -596,7 +596,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
- DCHECK_EQ(FIELD, 0);
+ DCHECK_EQ(DATA, 0);
And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
Branch(miss, ne, at, Operand(zero_reg));
@@ -3087,7 +3087,7 @@ void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
j(imm28);
}
// Emit a nop in the branch delay slot if required.
@@ -3104,7 +3104,7 @@ void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
}
@@ -3124,7 +3124,7 @@ void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
}
@@ -3243,7 +3243,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- DCHECK_EQ(Smi::FromInt(0), 0);
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
// The second zero_reg indicates no context.
// The first zero_reg is the NULL frame pointer.
// The operands are reversed to match the order of MultiPush/Pop.
@@ -3917,7 +3917,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
- li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+ li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
@@ -4413,135 +4413,6 @@ void MacroAssembler::TailCallStub(CodeStub* stub,
}
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
-
- DCHECK(function_address.is(a1) || function_address.is(a2));
-
- Label profiler_disabled;
- Label end_profiler_check;
- li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
- lb(t9, MemOperand(t9, 0));
- Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
-
- // Additional parameter is the address of the actual callback.
- li(t9, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- mov(t9, function_address);
- bind(&end_profiler_check);
-
- // Allocate HandleScope in callee-save registers.
- li(s3, Operand(next_address));
- lw(s0, MemOperand(s3, kNextOffset));
- lw(s1, MemOperand(s3, kLimitOffset));
- lw(s2, MemOperand(s3, kLevelOffset));
- Addu(s2, s2, Operand(1));
- sw(s2, MemOperand(s3, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, a0);
- li(a0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(this, t9);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, a0);
- li(a0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // Load value from ReturnValue.
- lw(v0, return_value_operand);
- bind(&return_value_loaded);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- sw(s0, MemOperand(s3, kNextOffset));
- if (emit_debug_code()) {
- lw(a1, MemOperand(s3, kLevelOffset));
- Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
- }
- Subu(s2, s2, Operand(1));
- sw(s2, MemOperand(s3, kLevelOffset));
- lw(at, MemOperand(s3, kLimitOffset));
- Branch(&delete_allocated_handles, ne, s1, Operand(at));
-
- // Check if the function scheduled an exception.
- bind(&leave_exit_frame);
- LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
- lw(t1, MemOperand(at));
- Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
- bind(&exception_handled);
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- lw(cp, *context_restore_operand);
- }
- li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
-
- bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0);
- }
- jmp(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- sw(s1, MemOperand(s3, kLimitOffset));
- mov(s0, v0);
- mov(a0, v0);
- PrepareCallCFunction(1, s1);
- li(a0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
- 1);
- mov(v0, s0);
- jmp(&leave_exit_frame);
-}
-
-
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -5166,10 +5037,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count,
- bool restore_context,
- bool do_return) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool restore_context, bool do_return,
+ bool argument_count_is_length) {
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
@@ -5200,8 +5070,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
if (argument_count.is_valid()) {
- sll(t8, argument_count, kPointerSizeLog2);
- addu(sp, sp, t8);
+ if (argument_count_is_length) {
+ addu(sp, sp, argument_count);
+ } else {
+ sll(t8, argument_count, kPointerSizeLog2);
+ addu(sp, sp, t8);
+ }
}
if (do_return) {
@@ -6010,6 +5884,19 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
}
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ lw(dst,
+ FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
+ : AccessorPair::kSetterOffset;
+ lw(dst, FieldMemOperand(dst, offset));
+}
+
+
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Register empty_fixed_array_value = t2;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 0bc1e15aa8..02845e2bbd 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -871,10 +871,9 @@ class MacroAssembler: public Assembler {
int stack_space = 0);
// Leave the current exit frame.
- void LeaveExitFrame(bool save_doubles,
- Register arg_count,
- bool restore_context,
- bool do_return = NO_EMIT_RETURN);
+ void LeaveExitFrame(bool save_doubles, Register arg_count,
+ bool restore_context, bool do_return = NO_EMIT_RETURN,
+ bool argument_count_is_length = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -1103,7 +1102,7 @@ class MacroAssembler: public Assembler {
lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
And(type, type, Operand(kIsNotStringMask));
- DCHECK_EQ(0, kStringTag);
+ DCHECK_EQ(0u, kStringTag);
return eq;
}
@@ -1286,16 +1285,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
void MovToFloatResult(DoubleRegister src);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call JS arguments space and
- // the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand);
-
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT);
@@ -1527,6 +1516,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template<typename Field>
void DecodeField(Register dst, Register src) {
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index dbc12a0797..bf3835a842 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -95,12 +95,11 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
+ Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h
index c7d8f6dcfb..65ee173b0f 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h
@@ -17,7 +17,8 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
+ RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
virtual ~RegExpMacroAssemblerMIPS();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index fabca67062..79f337d3df 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -13,6 +13,7 @@
#include "src/assembler.h"
#include "src/base/bits.h"
+#include "src/codegen.h"
#include "src/disasm.h"
#include "src/mips/constants-mips.h"
#include "src/mips/simulator-mips.h"
@@ -2244,7 +2245,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_fpu_register_double(fd_reg, -fs);
break;
case SQRT_D:
- set_fpu_register_double(fd_reg, sqrt(fs));
+ set_fpu_register_double(fd_reg, fast_sqrt(fs));
break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 76dd801af5..dede337e2a 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -118,10 +118,10 @@ int FPURegister::ToAllocationIndex(FPURegister reg) {
// RelocInfo.
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
- if (IsInternalReference(rmode_)) {
+ if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
- int count = Assembler::RelocateInternalReference(p, delta);
+ int count = Assembler::RelocateInternalReference(rmode_, p, delta);
CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index b8f582131c..4ce970da33 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -175,7 +175,8 @@ Register ToRegister(int num) {
// Implementation of RelocInfo.
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE;
+ 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
bool RelocInfo::IsCodedSpecially() {
@@ -634,7 +635,19 @@ bool Assembler::IsAndImmediate(Instr instr) {
}
-int64_t Assembler::target_at(int64_t pos) {
+int64_t Assembler::target_at(int64_t pos, bool is_internal) {
+ if (is_internal) {
+ int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
+ int64_t address = *p;
+ if (address == kEndOfJumpChain) {
+ return kEndOfChain;
+ } else {
+ int64_t instr_address = reinterpret_cast<int64_t>(p);
+ int64_t delta = instr_address - address;
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
+ }
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
// Emitted label constant, not part of a branch.
@@ -696,7 +709,13 @@ int64_t Assembler::target_at(int64_t pos) {
}
-void Assembler::target_at_put(int64_t pos, int64_t target_pos) {
+void Assembler::target_at_put(int64_t pos, int64_t target_pos,
+ bool is_internal) {
+ if (is_internal) {
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+ *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
+ return;
+ }
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
@@ -766,7 +785,8 @@ void Assembler::print(Label* L) {
} else {
PrintF("%d\n", instr);
}
- next(&l);
+ next(&l, internal_reference_positions_.find(l.pos()) !=
+ internal_reference_positions_.end());
}
} else {
PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
@@ -777,6 +797,7 @@ void Assembler::print(Label* L) {
void Assembler::bind_to(Label* L, int pos) {
DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
int32_t trampoline_pos = kInvalidSlotPos;
+ bool is_internal = false;
if (L->is_linked() && !trampoline_emitted_) {
unbound_labels_count_--;
next_buffer_check_ += kTrampolineSlotsSize;
@@ -785,23 +806,28 @@ void Assembler::bind_to(Label* L, int pos) {
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
int32_t dist = pos - fixup_pos;
- next(L); // Call next before overwriting link with target at fixup_pos.
+ is_internal = internal_reference_positions_.find(fixup_pos) !=
+ internal_reference_positions_.end();
+ next(L, is_internal); // Call next before overwriting link with target at
+ // fixup_pos.
Instr instr = instr_at(fixup_pos);
- if (IsBranch(instr)) {
+ if (is_internal) {
+ target_at_put(fixup_pos, pos, is_internal);
+ } else if (IsBranch(instr)) {
if (dist > kMaxBranchOffset) {
if (trampoline_pos == kInvalidSlotPos) {
trampoline_pos = get_trampoline_entry(fixup_pos);
CHECK(trampoline_pos != kInvalidSlotPos);
}
DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos);
+ target_at_put(fixup_pos, trampoline_pos, false);
fixup_pos = trampoline_pos;
dist = pos - fixup_pos;
}
- target_at_put(fixup_pos, pos);
+ target_at_put(fixup_pos, pos, false);
} else {
DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
- target_at_put(fixup_pos, pos);
+ target_at_put(fixup_pos, pos, false);
}
}
L->bind_to(pos);
@@ -819,9 +845,9 @@ void Assembler::bind(Label* L) {
}
-void Assembler::next(Label* L) {
+void Assembler::next(Label* L, bool is_internal) {
DCHECK(L->is_linked());
- int link = target_at(L->pos());
+ int link = target_at(L->pos(), is_internal);
if (link == kEndOfChain) {
L->Unuse();
} else {
@@ -2559,30 +2585,18 @@ void Assembler::bc1t(int16_t offset, uint16_t cc) {
// Debugging.
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
+ intptr_t pc_delta) {
+ if (RelocInfo::IsInternalReference(rmode)) {
+ int64_t* p = reinterpret_cast<int64_t*>(pc);
+ if (*p == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ *p += pc_delta;
+ return 2; // Number of instructions patched.
}
-}
-
-
-int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
Instr instr = instr_at(pc);
+ DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
DCHECK(IsJ(instr) || IsLui(instr));
if (IsLui(instr)) {
Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
@@ -2671,12 +2685,12 @@ void Assembler::GrowBuffer() {
// Relocate runtime entries.
for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
+ rmode == RelocInfo::INTERNAL_REFERENCE) {
byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
- RelocateInternalReference(p, pc_delta);
+ RelocateInternalReference(rmode, p, pc_delta);
}
}
-
DCHECK(!overflow());
}
@@ -2695,6 +2709,21 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dd(Label* label) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ if (label->is_bound()) {
+ uint64_t data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
+ } else {
+ uint64_t target_pos = jump_address(label);
+ emit(target_pos);
+ internal_reference_positions_.insert(label->pos());
+ }
+}
+
+
void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) =
@@ -2775,7 +2804,7 @@ void Assembler::CheckTrampolinePool() {
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and available
// to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
// TODO(plind): Verify this, presume I cannot use macro-assembler
// here.
lui(at, (imm64 >> 32) & kImm16Mask);
@@ -2834,7 +2863,7 @@ Address Assembler::target_address_at(Address pc) {
// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
// OS::nan_value() returns a qNaN.
void Assembler::QuietNaN(HeapObject* object) {
- HeapNumber::cast(object)->set_value(base::OS::nan_value());
+ HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 5ca2f3ad03..5ad98f6cd8 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -37,6 +37,9 @@
#define V8_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
+
+#include <set>
+
#include "src/assembler.h"
#include "src/mips64/constants-mips64.h"
#include "src/serialize.h"
@@ -323,6 +326,8 @@ const FPURegister f31 = { 31 };
#define kLithiumScratchReg2 s4
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
+// Used on mips64r6 for compare operations.
+#define kDoubleCompareReg f31
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -1053,12 +1058,18 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg);
- static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, const int raw_position);
+
+ static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
+ intptr_t pc_delta);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dd(Label* label);
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
@@ -1157,10 +1168,10 @@ class Assembler : public AssemblerBase {
int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
- int64_t target_at(int64_t pos);
+ int64_t target_at(int64_t pos, bool is_internal);
// Patch branch instruction at pos to branch to given branch target pos.
- void target_at_put(int64_t pos, int64_t target_pos);
+ void target_at_put(int64_t pos, int64_t target_pos, bool is_internal);
// Say if we need to relocate with this mode.
bool MustUseReg(RelocInfo::Mode rmode);
@@ -1330,7 +1341,7 @@ class Assembler : public AssemblerBase {
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
- void next(Label* L);
+ void next(Label* L, bool is_internal);
// One trampoline consists of:
// - space for trampoline slots,
@@ -1395,6 +1406,10 @@ class Assembler : public AssemblerBase {
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
static const int kInvalidSlotPos = -1;
+ // Internal reference positions, required for unbounded internal reference
+ // labels.
+ std::set<int64_t> internal_reference_positions_;
+
Trampoline trampoline_;
bool internal_trampoline_exception_;
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index c95ff30785..89fda10b05 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -137,6 +137,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// Tail call a stub.
+ __ mov(a3, a1);
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
@@ -314,6 +315,36 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
+static void Generate_Runtime_NewObject(MacroAssembler* masm,
+ bool create_memento,
+ Register original_constructor,
+ Label* count_incremented,
+ Label* allocated) {
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ld(a2, MemOperand(sp, 2 * kPointerSize));
+ __ push(a2);
+ }
+
+ __ push(a1); // argument for Runtime_NewObject
+ __ push(original_constructor); // original constructor
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(t0, v0);
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ if (create_memento) {
+ __ jmp(count_incremented);
+ } else {
+ __ jmp(allocated);
+ }
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
@@ -321,6 +352,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a2 : allocation site or undefined
+ // -- a3 : original constructor
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -342,7 +374,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
- __ AssertUndefinedOrAllocationSite(a2, a3);
+ __ AssertUndefinedOrAllocationSite(a2, t0);
__ push(a2);
}
@@ -351,7 +383,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ dsll32(a0, a0, 0);
__ MultiPushReversed(a0.bit() | a1.bit());
- Label rt_call, allocated;
+ Label rt_call, allocated, normal_new, count_incremented;
+ __ Branch(&normal_new, eq, a1, Operand(a3));
+
+ // Original constructor and function are different.
+ Generate_Runtime_NewObject(masm, create_memento, a3, &count_incremented,
+ &allocated);
+ __ bind(&normal_new);
+
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
if (FLAG_inline_new) {
@@ -597,27 +636,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// a1: constructor function
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ ld(a2, MemOperand(sp, 2 * kPointerSize));
- __ push(a2);
- }
+ Generate_Runtime_NewObject(masm, create_memento, a1, &count_incremented,
+ &allocated);
- __ push(a1); // Argument for Runtime_NewObject.
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
- } else {
- __ CallRuntime(Runtime::kNewObject, 1);
- }
- __ mov(t0, v0);
-
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
// Receiver for constructor call allocated.
// t0: JSObject
@@ -747,6 +768,95 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a2 : allocation site or undefined
+ // -- a3 : original constructor
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // TODO(dslomov): support pretenuring
+ CHECK(!FLAG_pretenuring_call_new);
+
+ {
+ FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+
+ __ mov(a4, a0);
+ __ SmiTag(a4);
+ __ push(a4); // Smi-tagged arguments count.
+
+ // Push new.target.
+ __ push(a3);
+
+ // receiver is the hole.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ push(at);
+
+ // Set up pointer to last argument.
+ __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ // a0: number of arguments
+ // a1: constructor function
+ // a2: address of last argument (caller sp)
+ // a4: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ SmiUntag(a4);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ dsll(at, a4, kPointerSizeLog2);
+ __ Daddu(at, a2, Operand(at));
+ __ ld(at, MemOperand(at));
+ __ push(at);
+ __ bind(&entry);
+ __ Daddu(a4, a4, Operand(-1));
+ __ Branch(&loop, ge, a4, Operand(zero_reg));
+
+ __ Daddu(a0, a0, Operand(1));
+
+ // Handle step in.
+ Label skip_step_in;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ li(a2, Operand(debug_step_in_fp));
+ __ ld(a2, MemOperand(a2));
+ __ Branch(&skip_step_in, eq, a2, Operand(zero_reg));
+
+ __ Push(a0, a1, a1);
+ __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
+ __ Pop(a0, a1);
+
+ __ bind(&skip_step_in);
+
+
+ // Call the function.
+ // a0: number of arguments
+ // a1: constructor function
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+
+ // Restore context from the frame.
+ // v0: result
+ // sp[0]: number of arguments (smi-tagged)
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ ld(a1, MemOperand(sp, 0));
+
+ // Leave construct frame.
+ }
+
+ __ SmiScale(at, a1, kPointerSizeLog2);
+ __ Daddu(sp, sp, Operand(at));
+ __ Daddu(sp, sp, Operand(kPointerSize));
+ __ Jump(ra);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from JSEntryStub::GenerateBody
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 6bbd1a3183..1a0b97221e 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -583,7 +583,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ And(a6, lhs, Operand(rhs));
__ JumpIfNotSmi(a6, &not_smis, a4);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -997,6 +997,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
@@ -1347,7 +1348,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
- Register scratch = a4;
+ Register scratch = a5;
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics ||
@@ -1465,7 +1466,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Branch(&loop);
__ bind(&is_instance);
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
if (!HasCallSiteInlineCheck()) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
@@ -1480,7 +1481,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ PatchRelocatedValue(inline_site, scratch, v0);
if (!ReturnTrueFalseObject()) {
- DCHECK_EQ(Smi::FromInt(0), 0);
__ mov(v0, zero_reg);
}
}
@@ -1585,6 +1585,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@@ -1643,6 +1644,9 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
+
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1674,6 +1678,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// a6 : allocated object (tagged)
// t1 : mapped parameter count (tagged)
+ CHECK(!has_new_target());
+
__ ld(a1, MemOperand(sp, 0 * kPointerSize));
// a1 = parameter count (tagged)
@@ -1715,7 +1721,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
Label param_map_size;
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
__ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
__ SmiScale(t1, a1, kPointerSizeLog2);
@@ -1930,6 +1936,12 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ if (has_new_target()) {
+ // Subtract 1 from smi-tagged arguments count.
+ __ SmiUntag(a1);
+ __ Daddu(a1, a1, Operand(-1));
+ __ SmiTag(a1);
+ }
__ sd(a1, MemOperand(sp, 0));
__ SmiScale(at, a1, kPointerSizeLog2);
@@ -2013,6 +2025,34 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
}
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // sp[0] : index of rest parameter
+ // sp[4] : number of parameters
+ // sp[8] : receiver displacement
+ // Check if the calling frame is an arguments adaptor frame.
+
+ Label runtime;
+ __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Patch the arguments.length and the parameters pointer.
+ __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ SmiScale(at, a1, kPointerSizeLog2);
+
+ __ Daddu(a3, a2, Operand(at));
+
+ __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sd(a3, MemOperand(sp, 2 * kPointerSize));
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+}
+
+
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2739,6 +2779,17 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(a2, a5);
}
+ // Pass function as original constructor.
+ if (IsSuperConstructorCall()) {
+ __ li(a4, Operand(1 * kPointerSize));
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(a4, a4, at);
+ __ daddu(at, sp, a4);
+ __ ld(a3, MemOperand(at, 0));
+ } else {
+ __ mov(a3, a1);
+ }
+
// Jump to the function-specific construct stub.
Register jmp_reg = a4;
__ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -2818,10 +2869,9 @@ static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id
+ // a2 - vector
Label miss;
- EmitLoadTypeFeedbackVector(masm, a2);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
__ Branch(&miss, ne, a1, Operand(at));
@@ -2836,6 +2886,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ Branch(&miss, ne, a5, Operand(at));
__ mov(a2, a4);
+ __ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@@ -2856,6 +2907,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
+ // a2 - vector
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
@@ -2866,13 +2918,31 @@ void CallICStub::Generate(MacroAssembler* masm) {
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, a2);
-
// The checks. First, does r1 match the recorded monomorphic target?
__ dsrl(a4, a3, 32 - kPointerSizeLog2);
__ Daddu(a4, a2, Operand(a4));
__ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
- __ Branch(&extra_checks_or_miss, ne, a1, Operand(a4));
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ ld(a5, FieldMemOperand(a4, WeakCell::kValueOffset));
+ __ Branch(&extra_checks_or_miss, ne, a1, Operand(a5));
+
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(a1, &extra_checks_or_miss);
__ bind(&have_js_function);
if (CallAsMethod()) {
@@ -2949,16 +3019,18 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
__ sd(a4, FieldMemOperand(a2, with_types_offset));
- // Store the function.
- __ dsrl(a4, a3, 32 - kPointerSizeLog2);
- __ Daddu(a4, a2, Operand(a4));
- __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sd(a1, MemOperand(a4, 0));
+ // Store the function. Use a stub since we need a frame for allocation.
+ // a2 - vector
+ // a3 - slot
+ // a1 - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(a1);
+ __ CallStub(&create_stub);
+ __ Pop(a1);
+ }
- // Update the write barrier.
- __ mov(a5, a1);
- __ RecordWrite(a2, a4, a5, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Branch(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
@@ -2980,26 +3052,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ ld(a4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the function and feedback info.
- __ Push(a4, a1, a2, a3);
+ // Push the receiver and the function and feedback info.
+ __ Push(a1, a2, a3);
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
- ExternalReference miss = ExternalReference(IC_Utility(id),
- masm->isolate());
- __ CallExternalReference(miss, 4);
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 3);
- // Move result to a1 and exit the internal frame.
- __ mov(a1, v0);
- }
+ // Move result to a1 and exit the internal frame.
+ __ mov(a1, v0);
}
@@ -3895,12 +3961,14 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ And(a2, a1, a0);
__ JumpIfSmi(a2, &miss);
+ __ GetWeakValue(a4, cell);
__ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
__ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ Branch(&miss, ne, a2, Operand(known_map_));
- __ Branch(&miss, ne, a3, Operand(known_map_));
+ __ Branch(&miss, ne, a2, Operand(a4));
+ __ Branch(&miss, ne, a3, Operand(a4));
__ Ret(USE_DELAY_SLOT);
__ dsubu(v0, a0, a1);
@@ -3990,7 +4058,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ dsll(at, index, 1);
__ Daddu(index, index, at); // index *= 3.
@@ -4516,6 +4584,20 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
}
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallICStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, a2);
+ CallIC_ArrayStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4761,8 +4843,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- a0 : argc (only if argument_count() == ANY)
// -- a1 : constructor
// -- a2 : AllocationSite or undefined
- // -- sp[0] : return address
- // -- sp[4] : last argument
+ // -- a3 : original constructor
+ // -- sp[0] : last argument
// -----------------------------------
if (FLAG_debug_code) {
@@ -4783,6 +4865,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(a2, a4);
}
+ Label subclassing;
+ __ Branch(&subclassing, ne, a1, Operand(a3));
+
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -4796,6 +4881,29 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ // Subclassing.
+ __ bind(&subclassing);
+ __ Push(a1);
+ __ Push(a3);
+
+ // Adjust argc.
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ li(at, Operand(2));
+ __ addu(a0, a0, at);
+ break;
+ case NONE:
+ __ li(a0, Operand(2));
+ break;
+ case ONE:
+ __ li(a0, Operand(3));
+ break;
+ }
+
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
}
@@ -4873,12 +4981,154 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ int64_t offset = (ref0.address() - ref1.address());
+ DCHECK(static_cast<int>(offset) == offset);
+ return static_cast<int>(offset);
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+static void CallApiFunctionAndReturn(
+ MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
+ MemOperand return_value_operand, MemOperand* context_restore_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address.is(a1) || function_address.is(a2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
+ __ lb(t9, MemOperand(t9, 0));
+ __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+ // Additional parameter is the address of the actual callback.
+ __ li(t9, Operand(thunk_ref));
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ __ mov(t9, function_address);
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ li(s3, Operand(next_address));
+ __ ld(s0, MemOperand(s3, kNextOffset));
+ __ ld(s1, MemOperand(s3, kLimitOffset));
+ __ ld(s2, MemOperand(s3, kLevelOffset));
+ __ Daddu(s2, s2, Operand(1));
+ __ sd(s2, MemOperand(s3, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate);
+ stub.GenerateCall(masm, t9);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ ld(v0, return_value_operand);
+ __ bind(&return_value_loaded);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ sd(s0, MemOperand(s3, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ ld(a1, MemOperand(s3, kLevelOffset));
+ __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
+ }
+ __ Dsubu(s2, s2, Operand(1));
+ __ sd(s2, MemOperand(s3, kLevelOffset));
+ __ ld(at, MemOperand(s3, kLimitOffset));
+ __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+ // Check if the function scheduled an exception.
+ __ bind(&leave_exit_frame);
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ ld(a5, MemOperand(at));
+ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
+ __ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ ld(cp, *context_restore_operand);
+ }
+ if (stack_space_offset != kInvalidStackOffset) {
+ DCHECK(kCArgsSlotsSize == 0);
+ __ ld(s0, MemOperand(sp, stack_space_offset));
+ } else {
+ __ li(s0, Operand(stack_space));
+ }
+ __ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN,
+ stack_space_offset != kInvalidStackOffset);
+ __ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
+ }
+ __ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ sd(s1, MemOperand(s3, kLimitOffset));
+ __ mov(s0, v0);
+ __ mov(a0, v0);
+ __ PrepareCallCFunction(1, s1);
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
+ 1);
+ __ mov(v0, s0);
+ __ jmp(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
// -- a0 : callee
// -- a4 : call_data
// -- a2 : holder
// -- a1 : api_function_address
+ // -- a3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -4893,10 +5143,6 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = a1;
Register context = cp;
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
-
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
@@ -4908,6 +5154,8 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
+ DCHECK(argc.is_immediate() || a3.is(argc.reg()));
+
// Save context, callee and call data.
__ Push(context, callee, call_data);
// Load context from callee.
@@ -4919,8 +5167,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
}
// Push return value and default return value.
__ Push(scratch, scratch);
- __ li(scratch,
- Operand(ExternalReference::isolate_address(isolate())));
+ __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
// Push isolate and holder.
__ Push(scratch, holder);
@@ -4940,36 +5187,69 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ Daddu(a0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ sd(scratch, MemOperand(a0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
- __ sd(at, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(at, Operand(argc));
- __ sd(at, MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ if (argc.is_immediate()) {
+ // FunctionCallbackInfo::values_
+ __ Daddu(at, scratch,
+ Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
+ __ sd(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(at, Operand(argc.immediate()));
+ __ sd(at, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
+ } else {
+ // FunctionCallbackInfo::values_
+ __ dsll(at, argc.reg(), kPointerSizeLog2);
+ __ Daddu(at, at, scratch);
+ __ Daddu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
+ __ sd(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ sd(argc.reg(), MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_
+ __ Daddu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
+ __ dsll(at, argc.reg(), kPointerSizeLog2);
+ __ sd(at, MemOperand(a0, 3 * kPointerSize));
+ }
+
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument.
int return_value_offset = 0;
- if (is_store) {
+ if (return_first_arg) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ int stack_space = 0;
+ int32_t stack_space_offset = 4 * kPointerSize;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_offset = kInvalidStackOffset;
+ }
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
+ stack_space_offset, return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
+ call_data_undefined);
+}
+
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- return_value_operand,
- &context_restore_operand);
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
@@ -5000,11 +5280,9 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kInvalidStackOffset,
+ MemOperand(fp, 6 * kPointerSize), NULL);
}
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index b29250653d..05a193c517 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -643,7 +643,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ Daddu(scratch1, array, Operand(kHeapObjectTag));
- __ sd(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
+ __ sd(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
scratch1,
@@ -665,13 +665,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register hole_lower = elements;
Register hole_upper = length;
__ li(hole_lower, Operand(kHoleNanLower32));
+ __ li(hole_upper, Operand(kHoleNanUpper32));
+
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch3: begin of FixedDoubleArray element fields, not tagged
- __ Branch(USE_DELAY_SLOT, &entry);
- __ li(hole_upper, Operand(kHoleNanUpper32)); // In delay slot.
+
+ __ Branch(&entry);
__ bind(&only_change_map);
__ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index d12148a985..cc599555b4 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -942,6 +942,7 @@ const int kCArgSlotCount = (kMipsAbi == kN64) ? 0 : 4;
// TODO(plind): find all usages and remove the needless instructions for n64.
const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
+const int kInvalidStackOffset = -1;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
} } // namespace v8::internal
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index d7a7f05fce..e77faedd3b 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -161,6 +161,9 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
+ __ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ sd(fp, MemOperand(a2));
+
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
diff --git a/deps/v8/src/mips64/full-codegen-mips64.cc b/deps/v8/src/mips64/full-codegen-mips64.cc
index 9d4ed09539..c400a8ba33 100644
--- a/deps/v8/src/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/mips64/full-codegen-mips64.cc
@@ -134,7 +134,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ if (is_sloppy(info->language_mode()) && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ld(at, MemOperand(sp, receiver_offset));
@@ -158,7 +158,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!info->function()->is_generator() || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -203,7 +203,7 @@ void FullCodeGenerator::Generate() {
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ push(a1);
- __ Push(info->scope()->GetScopeInfo());
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
@@ -245,6 +245,27 @@ void FullCodeGenerator::Generate() {
}
}
}
+
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Daddu(a3, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ li(a2, Operand(Smi::FromInt(num_parameters)));
+ __ li(a1, Operand(Smi::FromInt(rest_index)));
+ __ Push(a3, a2, a1);
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, v0, a1, a2);
+ }
+
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
@@ -267,15 +288,19 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type);
+ ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, v0, a1, a2);
@@ -435,7 +460,11 @@ void FullCodeGenerator::EmitReturnSequence() {
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ int32_t arg_count = info_->scope()->num_parameters() + 1;
+ if (IsSubclassConstructor(info_->function()->kind())) {
+ arg_count++;
+ }
+ int32_t sp_delta = arg_count * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
@@ -918,14 +947,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
+ ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
- DCHECK(variable->interface()->IsFrozen());
+ DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(a1, scope_->ContextChainLength(scope_->ScriptScope()));
- __ ld(a1, ContextOperand(a1, variable->interface()->Index()));
+ __ ld(a1, ContextOperand(a1, descriptor->Index()));
__ ld(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
// Assign it.
@@ -1224,7 +1254,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// For proxies, no filtering is done.
// TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK_EQ(Smi::FromInt(0), 0);
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ Branch(&update_each, eq, a2, Operand(zero_reg));
// Convert the entry to a string or (smi) 0 if it isn't a property
@@ -1242,6 +1272,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
// Generate code for the body of the loop.
@@ -1281,7 +1312,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1517,6 +1548,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
+ } else if (var->is_this()) {
+ CHECK(info_->function() != nullptr &&
+ (info_->function()->kind() & kSubclassConstructor) != 0);
+ // TODO(dslomov): implement 'this' hole check elimination.
+ skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
@@ -1668,11 +1704,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
__ push(v0); // Save result on stack.
@@ -1728,17 +1766,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ ld(a0, MemOperand(sp));
__ push(a0);
VisitForStackValue(value);
- if (property->emit_store()) {
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- __ Drop(2);
- }
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
break;
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = value;
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = value;
+ }
break;
}
}
@@ -1760,6 +1799,69 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(v0); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ ld(a0, MemOperand(sp)); // Duplicate receiver.
+ __ push(a0);
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ __ li(a0, Operand(Smi::FromInt(NONE)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ li(a0, Operand(Smi::FromInt(NONE)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ li(a0, Operand(Smi::FromInt(NONE)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ break;
+ }
+ }
+ }
+
if (expr->has_function()) {
DCHECK(result_saved);
__ ld(a0, MemOperand(sp));
@@ -1815,6 +1917,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
bool result_saved = false; // Is the result saved to the stack?
@@ -1954,19 +2057,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(v0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
- mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
+ EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
@@ -2349,7 +2448,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left_expr,
Expression* right_expr) {
Label done, smi_case, stub_call;
@@ -2370,7 +2468,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2453,9 +2551,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
- Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- DCHECK(key != NULL);
if (property->is_static()) {
__ ld(scratch, MemOperand(sp, kPointerSize)); // constructor
@@ -2463,24 +2559,29 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ ld(scratch, MemOperand(sp, 0)); // prototype
}
__ push(scratch);
- VisitForStackValue(key);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
- __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
break;
case ObjectLiteral::Property::SETTER:
- __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
break;
default:
@@ -2496,12 +2597,10 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
__ pop(a1);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2576,7 +2675,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
break;
}
@@ -2642,8 +2741,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ li(a4, Operand(var->name()));
- __ li(a3, Operand(Smi::FromInt(strict_mode())));
- // jssp[0] : mode.
+ __ li(a3, Operand(Smi::FromInt(language_mode())));
+ // jssp[0] : language mode.
// jssp[8] : name.
// jssp[16] : context.
// jssp[24] : value.
@@ -2662,7 +2761,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+ } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
}
@@ -2697,8 +2796,8 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(v0);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
+ __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
4);
}
@@ -2710,9 +2809,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(v0);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime(
+ (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
@@ -2730,7 +2830,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
- Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2753,8 +2854,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(v0);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
@@ -2769,8 +2868,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
- context()->Plug(v0);
}
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(v0);
}
@@ -2920,9 +3020,8 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(
- isolate(), arg_count, call_type);
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2949,8 +3048,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ld(a5, MemOperand(fp, receiver_offset * kPointerSize));
- // a4: the strict mode.
- __ li(a4, Operand(Smi::FromInt(strict_mode())));
+ // a4: the language mode.
+ __ li(a4, Operand(Smi::FromInt(language_mode())));
// a1: the start position of the scope the calls resides in.
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
@@ -2962,8 +3061,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
- DCHECK(super_ref != NULL);
+void FullCodeGenerator::EmitLoadSuperConstructor() {
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(a0);
__ CallRuntime(Runtime::kGetPrototype, 1);
@@ -3084,11 +3182,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
}
} else if (call_type == Call::SUPER_CALL) {
- SuperReference* super_ref = callee->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ Push(result_register());
- VisitForStackValue(super_ref->this_var());
- EmitCall(expr, CallICState::METHOD);
+ EmitSuperConstructorCall(expr);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -3117,12 +3211,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- if (expr->expression()->IsSuperReference()) {
- EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
- __ Push(result_register());
- } else {
- VisitForStackValue(expr->expression());
- }
+ DCHECK(!expr->expression()->IsSuperReference());
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3130,6 +3220,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
+
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetSourcePosition(expr->position());
@@ -3155,6 +3246,66 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ if (!ValidateSuperCall(expr)) return;
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ push(result_register());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into a1 and a0.
+ __ li(a0, Operand(arg_count));
+ __ ld(a1, MemOperand(sp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ UNREACHABLE();
+ /* TODO(dslomov): support pretenuring.
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ */
+ }
+
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ RecordJSReturnSite(expr);
+
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ Variable* this_var = super_ref->this_var()->var();
+ GetVar(a1, this_var);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ Label uninitialized_this;
+ __ Branch(&uninitialized_this, eq, a1, Operand(at));
+ __ li(a0, Operand(this_var->name()));
+ __ Push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
+ EmitVariableAssignment(this_var, Token::INIT_CONST);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3688,7 +3839,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -4036,6 +4187,64 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ Push(result_register());
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, args_set_up, runtime;
+ __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ // default constructor has no arguments, so no adaptor frame means no args.
+ __ mov(a0, zero_reg);
+ __ Branch(&args_set_up);
+
+ // Copy arguments from adaptor frame.
+ {
+ __ bind(&adaptor_frame);
+ __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(a1, a1);
+
+ // Subtract 1 from arguments count, for new.target.
+ __ Daddu(a1, a1, Operand(-1));
+ __ mov(a0, a1);
+
+ // Get arguments pointer in a2.
+ __ dsll(at, a1, kPointerSizeLog2);
+ __ Daddu(a2, a2, Operand(at));
+ __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset));
+ Label loop;
+ __ bind(&loop);
+ // Pre-decrement a2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Daddu(a2, a2, Operand(-kPointerSize));
+ __ ld(a3, MemOperand(a2));
+ __ Push(a3);
+ __ Daddu(a1, a1, Operand(-1));
+ __ Branch(&loop, ne, a1, Operand(zero_reg));
+ }
+
+ __ bind(&args_set_up);
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ Daddu(at, at, Operand(sp));
+ __ ld(a1, MemOperand(at, 0));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ context()->Plug(result_register());
+}
+
+
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
@@ -4055,7 +4264,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(2, args->length());
- DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
@@ -4456,7 +4665,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ li(a1, Operand(Smi::FromInt(strict_mode())));
+ __ li(a1, Operand(Smi::FromInt(language_mode())));
__ push(a1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4464,7 +4673,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- DCHECK(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(is_sloppy(language_mode()) || var->is_this());
if (var->IsUnallocated()) {
__ ld(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
@@ -4681,6 +4890,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4715,8 +4925,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4786,7 +4995,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 44c8dff17d..8d1b9f29e0 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -98,7 +98,19 @@ void FastCloneShallowObjectDescriptor::Initialize(
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, a2, a3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a2, a3, a1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
@@ -119,6 +131,16 @@ void CallFunctionWithFeedbackDescriptor::Initialize(
}
+void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a3, a2};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, a1};
data->Initialize(arraysize(registers), registers, NULL);
@@ -299,6 +321,27 @@ void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
a4, // call_data
a2, // holder
a1, // api_function_address
+ a3, // actual number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Integer32(), // actual number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ a0, // callee
+ a4, // call_data
+ a2, // holder
+ a1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.cc b/deps/v8/src/mips64/lithium-codegen-mips64.cc
index a817a285a4..ae2e792f42 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.cc
@@ -60,7 +60,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
}
@@ -118,8 +117,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() &&
- info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -311,7 +309,7 @@ bool LCodeGen::GenerateJumpTable() {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
@@ -765,9 +763,9 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
- const char* detail, Register src1,
- const Operand& src2) {
+ Register src1, const Operand& src2) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
@@ -808,21 +806,22 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ bind(&skip);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() ||
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
@@ -832,12 +831,12 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, Register src1,
- const Operand& src2) {
+ Deoptimizer::DeoptReason deopt_reason,
+ Register src1, const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2);
+ DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
}
@@ -859,6 +858,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1067,7 +1067,8 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ dsubu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
__ dsubu(dividend, zero_reg, dividend);
@@ -1086,7 +1087,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, "division by zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1099,7 +1100,8 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
__ bind(&remainder_not_zero);
}
}
@@ -1118,7 +1120,8 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
+ Operand(zero_reg));
}
// Check for kMinInt % -1, div will return kMinInt, which is not what we
@@ -1127,7 +1130,7 @@ void LCodeGen::DoModI(LModI* instr) {
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
@@ -1140,7 +1143,8 @@ void LCodeGen::DoModI(LModI* instr) {
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
+ Operand(zero_reg));
}
__ bind(&done);
}
@@ -1156,18 +1160,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1197,14 +1202,15 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, "division by zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1213,7 +1219,8 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Dmul(scratch0(), result, Operand(divisor));
__ Dsubu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
+ Operand(zero_reg));
}
}
@@ -1231,14 +1238,16 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+ Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+ Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1247,7 +1256,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@@ -1259,7 +1268,8 @@ void LCodeGen::DoDivI(LDivI* instr) {
} else {
__ dmod(remainder, dividend, divisor);
}
- DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
+ Operand(zero_reg));
}
}
@@ -1304,14 +1314,14 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ Dsubu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
}
__ Xor(scratch, scratch, result);
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(gt, instr, "overflow", result, Operand(kMaxInt));
+ DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt));
}
return;
}
@@ -1339,14 +1349,15 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, "division by zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ Operand(zero_reg));
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1390,14 +1401,16 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+ Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+ Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1406,7 +1419,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@@ -1443,14 +1456,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
}
switch (constant) {
case -1:
if (overflow) {
__ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(gt, instr, "overflow", scratch, Operand(kMaxInt));
+ DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, scratch,
+ Operand(kMaxInt));
} else {
__ Dsubu(result, zero_reg, left);
}
@@ -1459,7 +1473,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
+ Operand(zero_reg));
}
__ mov(result, zero_reg);
break;
@@ -1514,7 +1529,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
- DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1529,7 +1544,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
+ Operand(zero_reg));
__ bind(&done);
}
}
@@ -1593,8 +1609,10 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
// TODO(yy): (-1) >>> 0. anything else?
- DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg));
- DeoptimizeIf(gt, instr, "negative value", result, Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
+ Operand(zero_reg));
+ DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result,
+ Operand(kMaxInt));
}
break;
case Token::SHL:
@@ -1629,7 +1647,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
+ Operand(zero_reg));
}
__ Move(result, left);
}
@@ -1685,10 +1704,13 @@ void LCodeGen::DoSubI(LSubI* instr) {
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+ Operand(zero_reg));
if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt));
- DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt));
+ DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, ToRegister(result),
+ Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, ToRegister(result),
+ Operand(kMinInt));
}
}
}
@@ -1743,9 +1765,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(!scratch.is(object));
__ SmiTst(object, at);
- DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject, scratch,
+ Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1880,11 +1903,14 @@ void LCodeGen::DoAddI(LAddI* instr) {
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+ Operand(zero_reg));
// if not smi, it must int32.
if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt));
- DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt));
+ DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, ToRegister(result),
+ Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, ToRegister(result),
+ Operand(kMinInt));
}
}
}
@@ -1998,8 +2024,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(a0));
DCHECK(ToRegister(instr->result()).is(v0));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
@@ -2146,7 +2171,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
}
const Register map = scratch0();
@@ -2202,7 +2227,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, "unexpected object", zero_reg,
+ DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
Operand(zero_reg));
}
}
@@ -2337,6 +2362,8 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
Register scratch = scratch0();
__ FmoveHigh(scratch, input_reg);
+ __ dsll32(scratch, scratch, 0); // FmoveHigh (mfhc1) sign-extends.
+ __ dsrl32(scratch, scratch, 0); // Use only low 32-bits.
EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
}
@@ -2850,7 +2877,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ ld(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole", result, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
}
}
@@ -2905,7 +2932,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register payload = ToRegister(instr->temp());
__ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole", payload, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, payload, Operand(at));
}
// Store the value.
@@ -2923,7 +2950,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, "hole", result, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
@@ -2947,7 +2974,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, "hole", scratch, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
@@ -3042,7 +3069,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole", result, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
@@ -3188,8 +3215,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr, "negative value", result,
- Operand(0x80000000));
+ DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
+ result, Operand(0x80000000));
}
break;
case FLOAT32_ELEMENTS:
@@ -3248,8 +3275,9 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ ldc1(result, MemOperand(scratch));
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32));
+ __ lwu(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
+ Operand(kHoleNanUpper32));
}
}
@@ -3303,10 +3331,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (hinstr->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
+ Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole", result, Operand(scratch));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
}
}
}
@@ -3462,10 +3491,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr, "not a JavaScript object", scratch,
+ DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
Operand(FIRST_SPEC_OBJECT_TYPE));
__ Branch(&result_in_receiver);
@@ -3501,7 +3530,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr, "too many arguments", length,
+ DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
@@ -3582,24 +3611,19 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- A1State a1_state) {
+ int formal_parameter_count, int arity,
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
+ Register function_reg = a1;
LPointerMap* pointers = instr->pointer_map();
if (can_invoke_directly) {
- if (a1_state == A1_UNINITIALIZED) {
- __ li(a1, function);
- }
-
// Change context.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
@@ -3608,7 +3632,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
__ Call(at);
// Set up deoptimization.
@@ -3617,7 +3641,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
}
}
@@ -3632,7 +3656,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
Label done;
Register exponent = scratch0();
@@ -3699,7 +3723,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ dsubu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
__ bind(&done);
}
@@ -3754,7 +3778,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3763,7 +3787,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ Branch(&done, ne, result, Operand(zero_reg));
__ mfhc1(scratch1, input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ Operand(zero_reg));
__ bind(&done);
}
}
@@ -3796,7 +3821,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr, "overflow", scratch,
+ DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
@@ -3814,7 +3839,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
@@ -3833,7 +3858,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3842,7 +3867,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&check_sign_on_zero);
__ mfhc1(scratch, input); // Get exponent/sign bits.
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
+ Operand(zero_reg));
}
__ bind(&done);
}
@@ -3908,7 +3934,7 @@ void LCodeGen::DoPower(LPower* instr) {
DCHECK(!a7.is(tagged_exponent));
__ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, "not a heap number", a7, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3967,9 +3993,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- A1_CONTAINS_TARGET);
+ instr->arity(), instr);
}
}
@@ -4078,8 +4102,30 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(a3));
+ DCHECK(vector_register.is(a2));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ li(vector_register, vector);
+ __ li(slot_register, Operand(Smi::FromInt(index)));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4174,6 +4220,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Register scratch2 = scratch1();
Register scratch1 = scratch0();
+
HObjectAccess access = instr->hydrogen()->access();
int offset = access.offset();
if (access.IsExternalMemory()) {
@@ -4188,7 +4235,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DCHECK(!representation.IsSmi() ||
!instr->value()->IsConstantOperand() ||
IsSmi(LConstantOperand::cast(instr->value())));
- if (representation.IsDouble()) {
+ if (!FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
DCHECK(!instr->hydrogen()->has_transition());
DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
@@ -4219,7 +4266,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
destination = scratch1;
__ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
}
- Register value = ToRegister(instr->value());
+
if (representation.IsSmi() && SmiValuesAre32Bits() &&
instr->hydrogen()->value()->representation().IsInteger32()) {
DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
@@ -4227,16 +4274,25 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ Load(scratch2, FieldMemOperand(destination, offset), representation);
__ AssertSmi(scratch2);
}
-
// Store int value directly to upper half of the smi.
offset += kPointerSize / 2;
representation = Representation::Integer32();
}
-
MemOperand operand = FieldMemOperand(destination, offset);
- __ Store(value, operand, representation);
+
+ if (FLAG_unbox_double_fields && representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ sdc1(value, operand);
+ } else {
+ DCHECK(instr->value()->IsRegister());
+ Register value = ToRegister(instr->value());
+ __ Store(value, operand, representation);
+ }
+
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
+ Register value = ToRegister(instr->value());
__ RecordWriteField(destination,
offset,
value,
@@ -4256,7 +4312,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4279,7 +4335,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, "out of bounds", reg, operand);
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
}
}
@@ -4521,7 +4577,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4570,7 +4626,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
ne, &no_memento_found);
- DeoptimizeIf(al, instr, "memento found");
+ DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4873,12 +4929,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0x80000000));
- DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
@@ -4894,7 +4950,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
@@ -4919,7 +4975,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
+ Operand(at));
}
// Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
@@ -4927,7 +4984,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ mfc1(at, result_reg);
__ Branch(&done, ne, at, Operand(zero_reg));
__ mfhc1(scratch, result_reg); // Get exponent/sign bits.
- DeoptimizeIf(eq, instr, "minus zero", scratch,
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
@@ -4935,7 +4992,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -5000,12 +5057,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2,
- Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
+ scratch2, Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
- DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
+ Operand(at));
// Load the double value.
__ ldc1(double_scratch,
@@ -5020,7 +5078,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
except_flag,
kCheckForInexactConversion);
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -5028,7 +5086,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ Operand(zero_reg));
}
}
__ bind(&done);
@@ -5104,7 +5163,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -5112,7 +5171,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ Operand(zero_reg));
__ bind(&done);
}
}
@@ -5138,7 +5198,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -5146,7 +5206,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ Operand(zero_reg));
__ bind(&done);
}
}
@@ -5157,7 +5218,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
}
@@ -5165,7 +5226,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
}
}
@@ -5183,12 +5244,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+ Operand(first));
} else {
- DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first));
+ DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
+ Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last));
+ DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
+ Operand(last));
}
}
} else {
@@ -5199,11 +5263,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at,
- Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
+ at, Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+ Operand(tag));
}
}
}
@@ -5218,9 +5283,9 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ ld(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object));
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
}
}
@@ -5236,7 +5301,8 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
+ Operand(zero_reg));
}
@@ -5290,7 +5356,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
}
__ bind(&success);
@@ -5328,7 +5394,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
@@ -5545,7 +5611,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ li(a2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -5753,7 +5819,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
type = Deoptimizer::LAZY;
}
- DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg,
+ DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
Operand(zero_reg));
}
@@ -5844,18 +5910,18 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(eq, instr, "undefined", object, Operand(at));
+ DeoptimizeIf(eq, instr, Deoptimizer::kUndefined, object, Operand(at));
Register null_value = a5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- DeoptimizeIf(eq, instr, "null", object, Operand(null_value));
+ DeoptimizeIf(eq, instr, Deoptimizer::kNull, object, Operand(null_value));
__ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr, "not a JavaScript object", a1,
+ DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
Operand(LAST_JS_PROXY_TYPE));
Label use_cache, call_runtime;
@@ -5873,7 +5939,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
__ bind(&use_cache);
}
@@ -5893,7 +5959,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ld(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
__ bind(&done);
}
@@ -5903,7 +5969,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
}
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.h b/deps/v8/src/mips64/lithium-codegen-mips64.h
index b320dcb817..38890b371e 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.h
@@ -144,7 +144,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictMode strict_mode() const { return info()->strict_mode(); }
+ LanguageMode language_mode() const { return info()->language_mode(); }
Scope* scope() const { return scope_; }
@@ -211,18 +211,11 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
- enum A1State {
- A1_UNINITIALIZED,
- A1_CONTAINS_TARGET
- };
-
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- A1State a1_state);
+ int formal_parameter_count, int arity,
+ LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -230,12 +223,14 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::BailoutType bailout_type, const char* detail,
+ Deoptimizer::DeoptReason deopt_reason,
+ Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
+ void DeoptimizeIf(
+ Condition condition, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason,
+ Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
diff --git a/deps/v8/src/mips64/lithium-mips64.cc b/deps/v8/src/mips64/lithium-mips64.cc
index a12764b202..1e48881a06 100644
--- a/deps/v8/src/mips64/lithium-mips64.cc
+++ b/deps/v8/src/mips64/lithium-mips64.cc
@@ -262,6 +262,20 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
+void LCallFunction::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ if (hydrogen()->HasVectorAndSlot()) {
+ stream->Add(" (type-feedback-vector ");
+ temp_vector()->PrintTo(stream);
+ stream->Add(" ");
+ temp_slot()->PrintTo(stream);
+ stream->Add(")");
+ }
+}
+
+
void LCallJSFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -1251,7 +1265,15 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
- LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(a3);
+ vector = FixedTemp(a2);
+ }
+
+ LCallFunction* call =
+ new (zone()) LCallFunction(context, function, slot, vector);
return MarkAsCall(DefineFixed(call, v0), instr);
}
@@ -1501,9 +1523,10 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+ int32_t constant_value = 0;
if (right->IsConstant()) {
HConstant* constant = HConstant::cast(right);
- int32_t constant_value = constant->Integer32Value();
+ constant_value = constant->Integer32Value();
// Constants -1, 0 and 1 can be optimized if the result can overflow.
// For other constants, it can be optimized only without overflow.
if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
@@ -1526,7 +1549,10 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
right_op = UseRegister(right);
}
LMulI* mul = new(zone()) LMulI(left_op, right_op);
- if (can_overflow || bailout_on_minus_zero) {
+ if (right_op->IsConstantOperand()
+ ? ((can_overflow && constant_value == -1) ||
+ (bailout_on_minus_zero && constant_value <= 0))
+ : (can_overflow || bailout_on_minus_zero)) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
diff --git a/deps/v8/src/mips64/lithium-mips64.h b/deps/v8/src/mips64/lithium-mips64.h
index b89a9c4dd3..afc84efb8f 100644
--- a/deps/v8/src/mips64/lithium-mips64.h
+++ b/deps/v8/src/mips64/lithium-mips64.h
@@ -1901,20 +1901,26 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LCallFunction(LOperand* context, LOperand* function) {
+ LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = function;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
int arity() const { return hydrogen()->argument_count() - 1; }
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -2171,7 +2177,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
@@ -2228,7 +2234,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 2de1c2a1a9..4a2261b868 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -601,7 +601,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
- DCHECK_EQ(FIELD, 0);
+ DCHECK_EQ(DATA, 0);
And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
Branch(miss, ne, at, Operand(zero_reg));
@@ -3058,7 +3058,7 @@ void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
j(imm28);
}
// Emit a nop in the branch delay slot if required.
@@ -3075,7 +3075,7 @@ void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
li(at, Operand(imm64), ADDRESS_LOAD);
}
jr(at);
@@ -3094,7 +3094,7 @@ void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
li(at, Operand(imm64), ADDRESS_LOAD);
}
jalr(at);
@@ -3234,7 +3234,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- DCHECK_EQ(Smi::FromInt(0), 0);
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
// The second zero_reg indicates no context.
// The first zero_reg is the NULL frame pointer.
// The operands are reversed to match the order of MultiPush/Pop.
@@ -3884,7 +3884,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
- li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+ li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
@@ -4359,137 +4359,6 @@ void MacroAssembler::TailCallStub(CodeStub* stub,
}
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- int64_t offset = (ref0.address() - ref1.address());
- DCHECK(static_cast<int>(offset) == offset);
- return static_cast<int>(offset);
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
-
- DCHECK(function_address.is(a1) || function_address.is(a2));
-
- Label profiler_disabled;
- Label end_profiler_check;
- li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
- lb(t9, MemOperand(t9, 0));
- Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
-
- // Additional parameter is the address of the actual callback.
- li(t9, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- mov(t9, function_address);
- bind(&end_profiler_check);
-
- // Allocate HandleScope in callee-save registers.
- li(s3, Operand(next_address));
- ld(s0, MemOperand(s3, kNextOffset));
- ld(s1, MemOperand(s3, kLimitOffset));
- ld(s2, MemOperand(s3, kLevelOffset));
- Daddu(s2, s2, Operand(1));
- sd(s2, MemOperand(s3, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, a0);
- li(a0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(this, t9);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, a0);
- li(a0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // Load value from ReturnValue.
- ld(v0, return_value_operand);
- bind(&return_value_loaded);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- sd(s0, MemOperand(s3, kNextOffset));
- if (emit_debug_code()) {
- ld(a1, MemOperand(s3, kLevelOffset));
- Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
- }
- Dsubu(s2, s2, Operand(1));
- sd(s2, MemOperand(s3, kLevelOffset));
- ld(at, MemOperand(s3, kLimitOffset));
- Branch(&delete_allocated_handles, ne, s1, Operand(at));
-
- // Check if the function scheduled an exception.
- bind(&leave_exit_frame);
- LoadRoot(a4, Heap::kTheHoleValueRootIndex);
- li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
- ld(a5, MemOperand(at));
- Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
- bind(&exception_handled);
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- ld(cp, *context_restore_operand);
- }
- li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
-
- bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0);
- }
- jmp(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- sd(s1, MemOperand(s3, kLimitOffset));
- mov(s0, v0);
- mov(a0, v0);
- PrepareCallCFunction(1, s1);
- li(a0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
- 1);
- mov(v0, s0);
- jmp(&leave_exit_frame);
-}
-
-
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -5116,10 +4985,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count,
- bool restore_context,
- bool do_return) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool restore_context, bool do_return,
+ bool argument_count_is_length) {
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
@@ -5152,8 +5020,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
if (argument_count.is_valid()) {
- dsll(t8, argument_count, kPointerSizeLog2);
- daddu(sp, sp, t8);
+ if (argument_count_is_length) {
+ daddu(sp, sp, argument_count);
+ } else {
+ dsll(t8, argument_count, kPointerSizeLog2);
+ daddu(sp, sp, t8);
+ }
}
if (do_return) {
@@ -5350,7 +5222,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
#if defined(__APPLE__)
DCHECK_EQ(1, kSmiTagMask);
#else
- DCHECK_EQ((uint64_t)1, kSmiTagMask);
+ DCHECK_EQ((int64_t)1, kSmiTagMask);
#endif
or_(at, reg1, reg2);
JumpIfNotSmi(at, on_not_both_smi);
@@ -5365,7 +5237,7 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
#if defined(__APPLE__)
DCHECK_EQ(1, kSmiTagMask);
#else
- DCHECK_EQ((uint64_t)1, kSmiTagMask);
+ DCHECK_EQ((int64_t)1, kSmiTagMask);
#endif
// Both Smi tags must be 1 (not Smi).
and_(at, reg1, reg2);
@@ -6057,6 +5929,19 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
}
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ ld(dst,
+ FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
+ : AccessorPair::kSetterOffset;
+ ld(dst, FieldMemOperand(dst, offset));
+}
+
+
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Register empty_fixed_array_value = a6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index a9e877253a..1e25b334c7 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -901,10 +901,9 @@ class MacroAssembler: public Assembler {
int stack_space = 0);
// Leave the current exit frame.
- void LeaveExitFrame(bool save_doubles,
- Register arg_count,
- bool restore_context,
- bool do_return = NO_EMIT_RETURN);
+ void LeaveExitFrame(bool save_doubles, Register arg_count,
+ bool restore_context, bool do_return = NO_EMIT_RETURN,
+ bool argument_count_is_length = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -1133,7 +1132,7 @@ class MacroAssembler: public Assembler {
ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
And(type, type, Operand(kIsNotStringMask));
- DCHECK_EQ(0, kStringTag);
+ DCHECK_EQ(0u, kStringTag);
return eq;
}
@@ -1316,16 +1315,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
void MovToFloatResult(DoubleRegister src);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call JS arguments space and
- // the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand);
-
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT);
@@ -1602,6 +1591,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template<typename Field>
void DecodeField(Register dst, Register src) {
diff --git a/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
index b20cab9825..c1c0e4c2ea 100644
--- a/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
@@ -131,12 +131,11 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
+ Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/deps/v8/src/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/mips64/regexp-macro-assembler-mips64.h
index dd4e8a93bc..ae4d25b79a 100644
--- a/deps/v8/src/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/regexp-macro-assembler-mips64.h
@@ -17,7 +17,8 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
+ RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
virtual ~RegExpMacroAssemblerMIPS();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 9899d47a0e..bb39b97cca 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -13,6 +13,7 @@
#include "src/assembler.h"
#include "src/base/bits.h"
+#include "src/codegen.h"
#include "src/disasm.h"
#include "src/mips64/constants-mips64.h"
#include "src/mips64/simulator-mips64.h"
@@ -808,7 +809,7 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache,
FlushOnePage(i_cache, start, bytes_to_flush);
start += bytes_to_flush;
size -= bytes_to_flush;
- DCHECK_EQ((uint64_t)0, start & CachePage::kPageMask);
+ DCHECK_EQ((int64_t)0, start & CachePage::kPageMask);
offset = 0;
}
if (size != 0) {
@@ -2391,7 +2392,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_fpu_register_double(fd_reg, -fs);
break;
case SQRT_D:
- set_fpu_register_double(fd_reg, sqrt(fs));
+ set_fpu_register_double(fd_reg, fast_sqrt(fs));
break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index c325e58c86..1848e4f7c3 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -180,9 +180,9 @@ PropertyKind.Indexed = 2;
// A copy of the PropertyType enum from property-details.h
var PropertyType = {};
-PropertyType.Field = 0;
-PropertyType.Constant = 1;
-PropertyType.Callbacks = 3;
+PropertyType.Data = 0;
+PropertyType.DataConstant = 2;
+PropertyType.AccessorConstant = 3;
// Different attributes for a property.
@@ -848,7 +848,7 @@ ObjectMirror.prototype.lookupProperty = function(value) {
// Skip properties which are defined through assessors.
var property = properties[i];
- if (property.propertyType() != PropertyType.Callbacks) {
+ if (property.propertyType() != PropertyType.AccessorConstant) {
if (%_ObjectEquals(property.value_, value.value_)) {
return property;
}
@@ -1265,6 +1265,24 @@ RegExpMirror.prototype.multiline = function() {
};
+/**
+ * Returns whether this regular expression has the sticky (y) flag set.
+ * @return {boolean} Value of the sticky flag
+ */
+RegExpMirror.prototype.sticky = function() {
+ return this.value_.sticky;
+};
+
+
+/**
+ * Returns whether this regular expression has the unicode (u) flag set.
+ * @return {boolean} Value of the unicode flag
+ */
+RegExpMirror.prototype.unicode = function() {
+ return this.value_.unicode;
+};
+
+
RegExpMirror.prototype.toText = function() {
// Simpel to text which is used when on specialization in subclass.
return "/" + this.source() + "/";
@@ -1641,7 +1659,7 @@ PropertyMirror.prototype.setter = function() {
*/
PropertyMirror.prototype.isNative = function() {
return this.is_interceptor_ ||
- ((this.propertyType() == PropertyType.Callbacks) &&
+ ((this.propertyType() == PropertyType.AccessorConstant) &&
!this.hasGetter() && !this.hasSetter());
};
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index 988e7da7b3..bc18aebb69 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -111,6 +111,32 @@ class SnapshotWriter {
};
+char* GetExtraCode(char* filename) {
+ if (filename == NULL || strlen(filename) == 0) return NULL;
+ ::printf("Embedding extra script: %s\n", filename);
+ FILE* file = base::OS::FOpen(filename, "rb");
+ if (file == NULL) {
+ fprintf(stderr, "Failed to open '%s': errno %d\n", filename, errno);
+ exit(1);
+ }
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+ rewind(file);
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size;) {
+ int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
+ if (read < 0) {
+ fprintf(stderr, "Failed to read '%s': errno %d\n", filename, errno);
+ exit(1);
+ }
+ i += read;
+ }
+ fclose(file);
+ return chars;
+}
+
+
int main(int argc, char** argv) {
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@@ -124,7 +150,7 @@ int main(int argc, char** argv) {
// Print the usage if an error occurs when parsing the command line
// flags or if the help flag is set.
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
- if (result > 0 || argc != 2 || i::FLAG_help) {
+ if (result > 0 || (argc != 2 && argc != 3) || i::FLAG_help) {
::printf("Usage: %s [flag] ... outfile\n", argv[0]);
i::FlagList::PrintHelp();
return !i::FLAG_help;
@@ -139,9 +165,11 @@ int main(int argc, char** argv) {
{
SnapshotWriter writer(argv[1]);
if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
- StartupData blob = v8::V8::CreateSnapshotDataBlob();
+ char* extra_code = GetExtraCode(argc == 3 ? argv[2] : NULL);
+ StartupData blob = v8::V8::CreateSnapshotDataBlob(extra_code);
CHECK(blob.data);
writer.WriteSnapshot(blob);
+ delete[] extra_code;
delete[] blob.data;
}
diff --git a/deps/v8/src/modules.cc b/deps/v8/src/modules.cc
new file mode 100644
index 0000000000..eb01cf08e4
--- /dev/null
+++ b/deps/v8/src/modules.cc
@@ -0,0 +1,38 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/modules.h"
+
+#include "src/ast-value-factory.h"
+
+namespace v8 {
+namespace internal {
+
+// ---------------------------------------------------------------------------
+// Addition.
+
+void ModuleDescriptor::Add(const AstRawString* name, Zone* zone, bool* ok) {
+ void* key = const_cast<AstRawString*>(name);
+
+ ZoneHashMap** map = &exports_;
+ ZoneAllocationPolicy allocator(zone);
+
+ if (*map == nullptr) {
+ *map = new (zone->New(sizeof(ZoneHashMap)))
+ ZoneHashMap(ZoneHashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity, allocator);
+ }
+
+ ZoneHashMap::Entry* p =
+ (*map)->Lookup(key, name->hash(), !IsFrozen(), allocator);
+ if (p == nullptr || p->value != nullptr) {
+ *ok = false;
+ }
+
+ p->value = key;
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/modules.h b/deps/v8/src/modules.h
new file mode 100644
index 0000000000..ac04e47c4d
--- /dev/null
+++ b/deps/v8/src/modules.h
@@ -0,0 +1,99 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MODULES_H_
+#define V8_MODULES_H_
+
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+
+class AstRawString;
+
+
+class ModuleDescriptor : public ZoneObject {
+ public:
+ // ---------------------------------------------------------------------------
+ // Factory methods.
+
+ static ModuleDescriptor* New(Zone* zone) {
+ return new (zone) ModuleDescriptor();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Mutators.
+
+ // Add a name to the list of exports. If it already exists, or this descriptor
+ // is frozen, that's an error.
+ void Add(const AstRawString* name, Zone* zone, bool* ok);
+
+ // Do not allow any further refinements, directly or through unification.
+ void Freeze() { frozen_ = true; }
+
+ // Assign an index.
+ void Allocate(int index) {
+ DCHECK(IsFrozen() && index_ == -1);
+ index_ = index;
+ }
+
+ // ---------------------------------------------------------------------------
+ // Accessors.
+
+ // Check whether this is closed (i.e. fully determined).
+ bool IsFrozen() { return frozen_; }
+
+ int Length() {
+ DCHECK(IsFrozen());
+ ZoneHashMap* exports = exports_;
+ return exports ? exports->occupancy() : 0;
+ }
+
+ // The context slot in the hosting script context pointing to this module.
+ int Index() {
+ DCHECK(IsFrozen());
+ return index_;
+ }
+
+ // ---------------------------------------------------------------------------
+ // Iterators.
+
+ // Use like:
+ // for (auto it = descriptor->iterator(); !it.done(); it.Advance()) {
+ // ... it.name() ...
+ // }
+ class Iterator {
+ public:
+ bool done() const { return entry_ == NULL; }
+ const AstRawString* name() const {
+ DCHECK(!done());
+ return static_cast<const AstRawString*>(entry_->key);
+ }
+ void Advance() { entry_ = exports_->Next(entry_); }
+
+ private:
+ friend class ModuleDescriptor;
+ explicit Iterator(const ZoneHashMap* exports)
+ : exports_(exports), entry_(exports ? exports->Start() : NULL) {}
+
+ const ZoneHashMap* exports_;
+ ZoneHashMap::Entry* entry_;
+ };
+
+ Iterator iterator() const { return Iterator(this->exports_); }
+
+ // ---------------------------------------------------------------------------
+ // Implementation.
+ private:
+ bool frozen_;
+ ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
+ int index_;
+
+ ModuleDescriptor() : frozen_(false), exports_(NULL), index_(-1) {}
+};
+
+} } // namespace v8::internal
+
+#endif // V8_MODULES_H_
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index e990559bde..78a07c737e 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -207,7 +207,7 @@ void HeapObject::VerifyHeapPointer(Object* p) {
void Symbol::SymbolVerify() {
CHECK(IsSymbol());
CHECK(HasHashCode());
- CHECK_GT(Hash(), 0);
+ CHECK_GT(Hash(), 0u);
CHECK(name()->IsUndefined() || name()->IsString());
CHECK(flags()->IsSmi());
}
@@ -276,7 +276,7 @@ void JSObject::JSObjectVerify() {
}
DescriptorArray* descriptors = map()->instance_descriptors();
for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
- if (descriptors->GetDetails(i).type() == FIELD) {
+ if (descriptors->GetDetails(i).type() == DATA) {
Representation r = descriptors->GetDetails(i).representation();
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
if (IsUnboxedDoubleField(index)) {
@@ -324,8 +324,9 @@ void Map::MapVerify() {
SLOW_DCHECK(transitions()->IsSortedNoDuplicates());
SLOW_DCHECK(transitions()->IsConsistentWithBackPointers(this));
}
- SLOW_DCHECK(!FLAG_unbox_double_fields ||
- layout_descriptor()->IsConsistentWithMap(this));
+ // TODO(ishell): turn it back to SLOW_DCHECK.
+ CHECK(!FLAG_unbox_double_fields ||
+ layout_descriptor()->IsConsistentWithMap(this));
}
@@ -389,11 +390,14 @@ void FixedArray::FixedArrayVerify() {
void FixedDoubleArray::FixedDoubleArrayVerify() {
for (int i = 0; i < length(); i++) {
if (!is_the_hole(i)) {
- double value = get_scalar(i);
- CHECK(!std::isnan(value) ||
- (bit_cast<uint64_t>(value) ==
- bit_cast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
- ((bit_cast<uint64_t>(value) & Double::kSignMask) != 0));
+ uint64_t value = get_representation(i);
+ uint64_t unexpected =
+ bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN()) &
+ V8_UINT64_C(0x7FF8000000000000);
+ // Create implementation specific sNaN by inverting relevant bit.
+ unexpected ^= V8_UINT64_C(0x0008000000000000);
+ CHECK((value & V8_UINT64_C(0x7FF8000000000000)) != unexpected ||
+ (value & V8_UINT64_C(0x0007FFFFFFFFFFFF)) == V8_UINT64_C(0));
}
}
}
@@ -567,7 +571,6 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
VerifyObjectField(JSGlobalProxy::kNativeContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
- CHECK_EQ(FAST_HOLEY_SMI_ELEMENTS, GetElementsKind());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
@@ -675,6 +678,7 @@ void Code::CodeVerify() {
void Code::VerifyEmbeddedObjectsDependency() {
if (!CanContainWeakObjects()) return;
+ WeakCell* cell = CachedWeakCell();
DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
@@ -685,13 +689,13 @@ void Code::VerifyEmbeddedObjectsDependency() {
if (obj->IsMap()) {
Map* map = Map::cast(obj);
CHECK(map->dependent_code()->Contains(DependentCode::kWeakCodeGroup,
- this));
+ cell));
} else if (obj->IsJSObject()) {
- Object* raw_table = GetIsolate()->heap()->weak_object_to_code_table();
- WeakHashTable* table = WeakHashTable::cast(raw_table);
- Handle<Object> key_obj(obj, isolate);
- CHECK(DependentCode::cast(table->Lookup(key_obj))->Contains(
- DependentCode::kWeakCodeGroup, this));
+ WeakHashTable* table =
+ GetIsolate()->heap()->weak_object_to_code_table();
+ Handle<HeapObject> key_obj(HeapObject::cast(obj), isolate);
+ CHECK(DependentCode::cast(table->Lookup(key_obj))
+ ->Contains(DependentCode::kWeakCodeGroup, cell));
}
}
}
@@ -894,19 +898,6 @@ void ExecutableAccessorInfo::ExecutableAccessorInfoVerify() {
}
-void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorVerify() {
- CHECK(IsDeclaredAccessorDescriptor());
- VerifyPointer(serialized_data());
-}
-
-
-void DeclaredAccessorInfo::DeclaredAccessorInfoVerify() {
- CHECK(IsDeclaredAccessorInfo());
- AccessorInfoVerify();
- VerifyPointer(descriptor());
-}
-
-
void AccessorPair::AccessorPairVerify() {
CHECK(IsAccessorPair());
VerifyPointer(getter());
@@ -971,13 +962,6 @@ void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
}
-void SignatureInfo::SignatureInfoVerify() {
- CHECK(IsSignatureInfo());
- VerifyPointer(receiver());
- VerifyPointer(args());
-}
-
-
void TypeSwitchInfo::TypeSwitchInfoVerify() {
CHECK(IsTypeSwitchInfo());
VerifyPointer(types());
@@ -1188,37 +1172,16 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
}
-bool LayoutDescriptor::IsConsistentWithMap(Map* map) {
- if (FLAG_unbox_double_fields) {
- DescriptorArray* descriptors = map->instance_descriptors();
- int nof_descriptors = map->NumberOfOwnDescriptors();
- for (int i = 0; i < nof_descriptors; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
- FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
- bool tagged_expected =
- !field_index.is_inobject() || !details.representation().IsDouble();
- for (int bit = 0; bit < details.field_width_in_words(); bit++) {
- bool tagged_actual = IsTagged(details.field_index() + bit);
- DCHECK_EQ(tagged_expected, tagged_actual);
- if (tagged_actual != tagged_expected) return false;
- }
- }
- }
- return true;
-}
-
-
bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
DCHECK(valid_entries == -1);
Name* prev_key = NULL;
- PropertyKind prev_kind = DATA;
+ PropertyKind prev_kind = kData;
PropertyAttributes prev_attributes = NONE;
uint32_t prev_hash = 0;
for (int i = 0; i < number_of_transitions(); i++) {
Name* key = GetSortedKey(i);
uint32_t hash = key->Hash();
- PropertyKind kind = DATA;
+ PropertyKind kind = kData;
PropertyAttributes attributes = NONE;
if (!IsSpecialTransition(key)) {
Map* target = GetTarget(i);
@@ -1258,20 +1221,44 @@ bool TransitionArray::IsConsistentWithBackPointers(Map* current_map) {
}
-void Code::VerifyEmbeddedObjectsInFullCode() {
- // Check that no context-specific object has been embedded.
+// Estimates if there is a path from the object to a context.
+// This function is not precise, and can return false even if
+// there is a path to a context.
+bool CanLeak(Object* obj, Heap* heap, bool skip_weak_cell) {
+ if (!obj->IsHeapObject()) return false;
+ if (obj->IsWeakCell()) {
+ if (skip_weak_cell) return false;
+ return CanLeak(WeakCell::cast(obj)->value(), heap, skip_weak_cell);
+ }
+ if (obj->IsCell()) {
+ return CanLeak(Cell::cast(obj)->value(), heap, skip_weak_cell);
+ }
+ if (obj->IsPropertyCell()) {
+ return CanLeak(PropertyCell::cast(obj)->value(), heap, skip_weak_cell);
+ }
+ if (obj->IsContext()) return true;
+ if (obj->IsMap()) {
+ Map* map = Map::cast(obj);
+ for (int i = 0; i < Heap::kStrongRootListLength; i++) {
+ if (map == heap->roots_array_start()[i]) return false;
+ }
+ return true;
+ }
+ return CanLeak(HeapObject::cast(obj)->map(), heap, skip_weak_cell);
+}
+
+
+void Code::VerifyEmbeddedObjects(VerifyMode mode) {
+ if (kind() == OPTIMIZED_FUNCTION) return;
Heap* heap = GetIsolate()->heap();
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
+ bool skip_weak_cell = (mode == kNoContextSpecificPointers) ? false : true;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
- Object* obj = it.rinfo()->target_object();
- if (obj->IsCell()) obj = Cell::cast(obj)->value();
- if (obj->IsPropertyCell()) obj = PropertyCell::cast(obj)->value();
- if (!obj->IsHeapObject()) continue;
- Map* map = obj->IsMap() ? Map::cast(obj) : HeapObject::cast(obj)->map();
- int i = 0;
- while (map != heap->roots_array_start()[i++]) {
- CHECK_LT(i, Heap::kStrongRootListLength);
- }
+ Object* target = it.rinfo()->rmode() == RelocInfo::CELL
+ ? it.rinfo()->target_cell()
+ : it.rinfo()->target_object();
+ CHECK(!CanLeak(target, heap, skip_weak_cell));
}
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 485560f293..926e1c7a73 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -58,7 +58,7 @@ PropertyDetails PropertyDetails::AsDeleted() const {
int PropertyDetails::field_width_in_words() const {
- DCHECK(type() == FIELD);
+ DCHECK(location() == kField);
if (!FLAG_unbox_double_fields) return 1;
if (kDoubleSize == kPointerSize) return 1;
return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
@@ -161,9 +161,7 @@ bool Object::IsExternal() const {
}
-bool Object::IsAccessorInfo() const {
- return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo();
-}
+bool Object::IsAccessorInfo() const { return IsExecutableAccessorInfo(); }
bool Object::IsSmi() const {
@@ -1199,10 +1197,10 @@ MaybeHandle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
uint32_t index,
Handle<Object> value,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return SetPropertyWithHandler(proxy, receiver, name, value, strict_mode);
+ return SetPropertyWithHandler(proxy, receiver, name, value, language_mode);
}
@@ -1325,6 +1323,12 @@ Maybe<bool> JSProxy::HasElementWithHandler(Handle<JSProxy> proxy,
#define WRITE_INT32_FIELD(p, offset, value) \
(*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
+#define READ_UINT64_FIELD(p, offset) \
+ (*reinterpret_cast<const uint64_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_UINT64_FIELD(p, offset, value) \
+ (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_INT64_FIELD(p, offset) \
(*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset)))
@@ -1881,7 +1885,7 @@ Handle<String> Map::ExpectedTransitionKey(Handle<Map> map) {
int transition = TransitionArray::kSimpleTransitionIndex;
PropertyDetails details = transitions->GetTargetDetails(transition);
Name* name = transitions->GetKey(transition);
- if (details.type() != FIELD) return Handle<String>::null();
+ if (details.type() != DATA) return Handle<String>::null();
if (details.attributes() != NONE) return Handle<String>::null();
if (!name->IsString()) return Handle<String>::null();
return Handle<String>(String::cast(name));
@@ -1899,10 +1903,10 @@ Handle<Map> Map::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
DisallowHeapAllocation no_allocation;
if (!map->HasTransitionArray()) return Handle<Map>::null();
TransitionArray* transitions = map->transitions();
- int transition = transitions->Search(DATA, *key, NONE);
+ int transition = transitions->Search(kData, *key, NONE);
if (transition == TransitionArray::kNotFound) return Handle<Map>::null();
PropertyDetails details = transitions->GetTargetDetails(transition);
- if (details.type() != FIELD) return Handle<Map>::null();
+ if (details.type() != DATA) return Handle<Map>::null();
DCHECK_EQ(NONE, details.attributes());
return Handle<Map>(transitions->GetTarget(transition));
}
@@ -2188,6 +2192,14 @@ bool JSObject::HasFastProperties() {
}
+MaybeHandle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ LanguageMode language_mode) {
+ return JSObject::SetOwnElement(object, index, value, NONE, language_mode);
+}
+
+
bool Map::TooManyFastProperties(StoreFromKeyed store_mode) {
if (unused_property_fields() != 0) return false;
if (is_prototype_map()) return false;
@@ -2239,7 +2251,7 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
void Object::VerifyApiCallResultType() {
-#if ENABLE_EXTRA_CHECKS
+#if DEBUG
if (!(IsSmi() ||
IsString() ||
IsSymbol() ||
@@ -2251,7 +2263,7 @@ void Object::VerifyApiCallResultType() {
IsNull())) {
FATAL("API call returned invalid object");
}
-#endif // ENABLE_EXTRA_CHECKS
+#endif // DEBUG
}
@@ -2290,37 +2302,21 @@ void FixedArray::set(int index, Object* value) {
}
-inline bool FixedDoubleArray::is_the_hole_nan(double value) {
- return bit_cast<uint64_t, double>(value) == kHoleNanInt64;
-}
-
-
-inline double FixedDoubleArray::hole_nan_as_double() {
- return bit_cast<double, uint64_t>(kHoleNanInt64);
-}
-
-
-inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
- DCHECK(bit_cast<uint64_t>(base::OS::nan_value()) != kHoleNanInt64);
- DCHECK((bit_cast<uint64_t>(base::OS::nan_value()) >> 32) != kHoleNanUpper32);
- return base::OS::nan_value();
-}
-
-
double FixedDoubleArray::get_scalar(int index) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
DCHECK(index >= 0 && index < this->length());
- double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
- DCHECK(!is_the_hole_nan(result));
- return result;
+ DCHECK(!is_the_hole(index));
+ return READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
}
-int64_t FixedDoubleArray::get_representation(int index) {
+
+uint64_t FixedDoubleArray::get_representation(int index) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
DCHECK(index >= 0 && index < this->length());
- return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
+ int offset = kHeaderSize + index * kDoubleSize;
+ return READ_UINT64_FIELD(this, offset);
}
@@ -2338,8 +2334,12 @@ void FixedDoubleArray::set(int index, double value) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
- if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double();
- WRITE_DOUBLE_FIELD(this, offset, value);
+ if (std::isnan(value)) {
+ WRITE_DOUBLE_FIELD(this, offset, std::numeric_limits<double>::quiet_NaN());
+ } else {
+ WRITE_DOUBLE_FIELD(this, offset, value);
+ }
+ DCHECK(!is_the_hole(index));
}
@@ -2347,13 +2347,12 @@ void FixedDoubleArray::set_the_hole(int index) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
- WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+ WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
}
bool FixedDoubleArray::is_the_hole(int index) {
- int offset = kHeaderSize + index * kDoubleSize;
- return is_the_hole_nan(READ_DOUBLE_FIELD(this, offset));
+ return get_representation(index) == kHoleNanInt64;
}
@@ -2960,7 +2959,7 @@ int LinearSearch(T* array, Name* name, int len, int valid_entries,
return T::kNotFound;
} else {
DCHECK(len >= valid_entries);
- DCHECK_EQ(NULL, out_insertion_index); // Not supported here.
+ DCHECK_NULL(out_insertion_index); // Not supported here.
for (int number = 0; number < valid_entries; number++) {
Name* entry = array->GetKey(number);
uint32_t current_hash = entry->Hash();
@@ -3027,25 +3026,6 @@ PropertyDetails Map::GetLastDescriptorDetails() {
}
-void Map::LookupDescriptor(JSObject* holder,
- Name* name,
- LookupResult* result) {
- DescriptorArray* descriptors = this->instance_descriptors();
- int number = descriptors->SearchWithCache(name, this);
- if (number == DescriptorArray::kNotFound) return result->NotFound();
- result->DescriptorResult(holder, descriptors->GetDetails(number), number);
-}
-
-
-void Map::LookupTransition(JSObject* holder, Name* name,
- PropertyAttributes attributes,
- LookupResult* result) {
- int transition_index = this->SearchTransition(DATA, name, attributes);
- if (transition_index == TransitionArray::kNotFound) return result->NotFound();
- result->TransitionResult(holder, this->GetTransition(transition_index));
-}
-
-
FixedArrayBase* Map::GetInitialElements() {
if (has_fast_smi_or_object_elements() ||
has_fast_double_elements()) {
@@ -3149,13 +3129,13 @@ PropertyType DescriptorArray::GetType(int descriptor_number) {
int DescriptorArray::GetFieldIndex(int descriptor_number) {
- DCHECK(GetDetails(descriptor_number).type() == FIELD);
+ DCHECK(GetDetails(descriptor_number).location() == kField);
return GetDetails(descriptor_number).field_index();
}
HeapType* DescriptorArray::GetFieldType(int descriptor_number) {
- DCHECK(GetDetails(descriptor_number).type() == FIELD);
+ DCHECK(GetDetails(descriptor_number).location() == kField);
return HeapType::cast(GetValue(descriptor_number));
}
@@ -3166,13 +3146,13 @@ Object* DescriptorArray::GetConstant(int descriptor_number) {
Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
- DCHECK(GetType(descriptor_number) == CALLBACKS);
+ DCHECK(GetType(descriptor_number) == ACCESSOR_CONSTANT);
return GetValue(descriptor_number);
}
AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
- DCHECK(GetType(descriptor_number) == CALLBACKS);
+ DCHECK(GetType(descriptor_number) == ACCESSOR_CONSTANT);
Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
}
@@ -3343,7 +3323,6 @@ CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Foreign)
-CAST_ACCESSOR(FreeSpace)
CAST_ACCESSOR(GlobalObject)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(JSArray)
@@ -3402,6 +3381,12 @@ CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
+// static
+template <class Traits>
+STATIC_CONST_MEMBER_DEFINITION const InstanceType
+ FixedTypedArray<Traits>::kInstanceType;
+
+
template <class Traits>
FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
SLOW_DCHECK(object->IsHeapObject() &&
@@ -3452,6 +3437,39 @@ SMI_ACCESSORS(String, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
+FreeSpace* FreeSpace::next() {
+ DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+ (!GetHeap()->deserialization_complete() && map() == NULL));
+ DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+ return reinterpret_cast<FreeSpace*>(
+ Memory::Address_at(address() + kNextOffset));
+}
+
+
+FreeSpace** FreeSpace::next_address() {
+ DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+ (!GetHeap()->deserialization_complete() && map() == NULL));
+ DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+ return reinterpret_cast<FreeSpace**>(address() + kNextOffset);
+}
+
+
+void FreeSpace::set_next(FreeSpace* next) {
+ DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+ (!GetHeap()->deserialization_complete() && map() == NULL));
+ DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+ base::NoBarrier_Store(
+ reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
+ reinterpret_cast<base::AtomicWord>(next));
+}
+
+
+FreeSpace* FreeSpace::cast(HeapObject* o) {
+ SLOW_DCHECK(!o->GetHeap()->deserialization_complete() || o->IsFreeSpace());
+ return reinterpret_cast<FreeSpace*>(o);
+}
+
+
uint32_t Name::hash_field() {
return READ_UINT32_FIELD(this, kHashFieldOffset);
}
@@ -4235,11 +4253,13 @@ int32_t Int32ArrayTraits::defaultValue() { return 0; }
float Float32ArrayTraits::defaultValue() {
- return static_cast<float>(base::OS::nan_value());
+ return std::numeric_limits<float>::quiet_NaN();
}
-double Float64ArrayTraits::defaultValue() { return base::OS::nan_value(); }
+double Float64ArrayTraits::defaultValue() {
+ return std::numeric_limits<double>::quiet_NaN();
+}
template <class Traits>
@@ -4695,7 +4715,7 @@ bool Map::CanBeDeprecated() {
if (details.representation().IsSmi()) return true;
if (details.representation().IsDouble()) return true;
if (details.representation().IsHeapObject()) return true;
- if (details.type() == CONSTANT) return true;
+ if (details.type() == DATA_CONSTANT) return true;
}
return false;
}
@@ -4727,21 +4747,6 @@ void DependentCode::set_number_of_entries(DependencyGroup group, int value) {
}
-bool DependentCode::is_code_at(int i) {
- return get(kCodesStartIndex + i)->IsCode();
-}
-
-Code* DependentCode::code_at(int i) {
- return Code::cast(get(kCodesStartIndex + i));
-}
-
-
-CompilationInfo* DependentCode::compilation_info_at(int i) {
- return reinterpret_cast<CompilationInfo*>(
- Foreign::cast(get(kCodesStartIndex + i))->foreign_address());
-}
-
-
void DependentCode::set_object_at(int i, Object* object) {
set(kCodesStartIndex + i, object);
}
@@ -4752,11 +4757,6 @@ Object* DependentCode::object_at(int i) {
}
-Object** DependentCode::slot_at(int i) {
- return RawFieldOfElementAt(kCodesStartIndex + i);
-}
-
-
void DependentCode::clear_at(int i) {
set_undefined(kCodesStartIndex + i);
}
@@ -4850,20 +4850,33 @@ inline void Code::set_is_crankshafted(bool value) {
inline bool Code::is_turbofanned() {
- DCHECK(kind() == OPTIMIZED_FUNCTION || kind() == STUB);
return IsTurbofannedField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
inline void Code::set_is_turbofanned(bool value) {
- DCHECK(kind() == OPTIMIZED_FUNCTION || kind() == STUB);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = IsTurbofannedField::update(previous, value);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
+inline bool Code::can_have_weak_objects() {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ return CanHaveWeakObjectsField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+inline void Code::set_can_have_weak_objects(bool value) {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = CanHaveWeakObjectsField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
bool Code::optimizable() {
DCHECK_EQ(FUNCTION, kind());
return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
@@ -5188,10 +5201,21 @@ bool Code::IsWeakObjectInOptimizedCode(Object* object) {
return Map::cast(object)->CanTransition() &&
FLAG_weak_embedded_maps_in_optimized_code;
}
- if (object->IsJSObject() ||
- (object->IsCell() && Cell::cast(object)->value()->IsJSObject())) {
+ if (object->IsCell()) {
+ object = Cell::cast(object)->value();
+ } else if (object->IsPropertyCell()) {
+ object = PropertyCell::cast(object)->value();
+ }
+ if (object->IsJSObject()) {
return FLAG_weak_embedded_objects_in_optimized_code;
}
+ if (object->IsFixedArray()) {
+ // Contexts of inlined functions are embedded in optimized code.
+ Map* map = HeapObject::cast(object)->map();
+ Heap* heap = map->GetHeap();
+ return FLAG_weak_embedded_objects_in_optimized_code &&
+ map == heap->function_context_map();
+ }
return false;
}
@@ -5261,8 +5285,16 @@ void Map::UpdateDescriptors(DescriptorArray* descriptors,
if (layout_descriptor()->IsSlowLayout()) {
set_layout_descriptor(layout_desc);
}
+#ifdef VERIFY_HEAP
+ // TODO(ishell): remove these checks from VERIFY_HEAP mode.
+ if (FLAG_verify_heap) {
+ CHECK(layout_descriptor()->IsConsistentWithMap(this));
+ CHECK(visitor_id() == StaticVisitorBase::GetVisitorId(this));
+ }
+#else
SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
DCHECK(visitor_id() == StaticVisitorBase::GetVisitorId(this));
+#endif
}
}
@@ -5275,7 +5307,14 @@ void Map::InitializeDescriptors(DescriptorArray* descriptors,
if (FLAG_unbox_double_fields) {
set_layout_descriptor(layout_desc);
+#ifdef VERIFY_HEAP
+ // TODO(ishell): remove these checks from VERIFY_HEAP mode.
+ if (FLAG_verify_heap) {
+ CHECK(layout_descriptor()->IsConsistentWithMap(this));
+ }
+#else
SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
+#endif
set_visitor_id(StaticVisitorBase::GetVisitorId(this));
}
}
@@ -5315,7 +5354,7 @@ void Map::AppendDescriptor(Descriptor* desc) {
// it should never try to (otherwise, layout descriptor must be updated too).
#ifdef DEBUG
PropertyDetails details = desc->GetDetails();
- CHECK(details.type() != FIELD || !details.representation().IsDouble());
+ CHECK(details.type() != DATA || !details.representation().IsDouble());
#endif
}
@@ -5489,12 +5528,6 @@ ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
ACCESSORS(AccessorInfo, expected_receiver_type, Object,
kExpectedReceiverTypeOffset)
-ACCESSORS(DeclaredAccessorDescriptor, serialized_data, ByteArray,
- kSerializedDataOffset)
-
-ACCESSORS(DeclaredAccessorInfo, descriptor, DeclaredAccessorDescriptor,
- kDescriptorOffset)
-
ACCESSORS(ExecutableAccessorInfo, getter, Object, kGetterOffset)
ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
@@ -5517,6 +5550,7 @@ ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
SMI_ACCESSORS(InterceptorInfo, flags, kFlagsOffset)
BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
kCanInterceptSymbolsBit)
+BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, kAllCanReadBit)
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
@@ -5548,9 +5582,6 @@ ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
kInternalFieldCountOffset)
-ACCESSORS(SignatureInfo, receiver, Object, kReceiverOffset)
-ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
-
ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
@@ -5576,6 +5607,8 @@ ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
ACCESSORS_TO_SMI(Script, eval_from_instructions_offset,
kEvalFrominstructionsOffsetOffset)
ACCESSORS_TO_SMI(Script, flags, kFlagsOffset)
+BOOL_ACCESSORS(Script, flags, is_embedder_debug_script,
+ kIsEmbedderDebugScriptBit)
BOOL_ACCESSORS(Script, flags, is_shared_cross_origin, kIsSharedCrossOriginBit)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
@@ -5637,6 +5670,7 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
kRemovePrototypeBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache,
kDoNotCacheBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, instantiated, kInstantiatedBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
@@ -5662,7 +5696,7 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, deserialized, kDeserialized)
#if V8_HOST_ARCH_32_BIT
SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
-SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
+SMI_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
kFormalParameterCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
@@ -5710,8 +5744,7 @@ SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, length, kLengthOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- formal_parameter_count,
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, internal_formal_parameter_count,
kFormalParameterCountOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
@@ -5764,17 +5797,22 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
}
-StrictMode SharedFunctionInfo::strict_mode() {
- return BooleanBit::get(compiler_hints(), kStrictModeFunction)
- ? STRICT : SLOPPY;
+LanguageMode SharedFunctionInfo::language_mode() {
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ return construct_language_mode(
+ BooleanBit::get(compiler_hints(), kStrictModeFunction),
+ BooleanBit::get(compiler_hints(), kStrongModeFunction));
}
-void SharedFunctionInfo::set_strict_mode(StrictMode strict_mode) {
- // We only allow mode transitions from sloppy to strict.
- DCHECK(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
+void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ // We only allow language mode transitions that set the same language mode
+ // again or go up in the chain:
+ DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
int hints = compiler_hints();
- hints = BooleanBit::set(hints, kStrictModeFunction, strict_mode == STRICT);
+ hints = BooleanBit::set(hints, kStrictModeFunction, is_strict(language_mode));
+ hints = BooleanBit::set(hints, kStrongModeFunction, is_strong(language_mode));
set_compiler_hints(hints);
}
@@ -5794,8 +5832,6 @@ void SharedFunctionInfo::set_kind(FunctionKind kind) {
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_super_property,
kUsesSuperProperty)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_super_constructor_call,
- kUsesSuperConstructorCall)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
kInlineBuiltin)
@@ -5811,6 +5847,8 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
kIsConciseMethod)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_accessor_function,
+ kIsAccessorFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
kIsDefaultConstructor)
@@ -5836,7 +5874,7 @@ bool Script::HasValidSource() {
void SharedFunctionInfo::DontAdaptArguments() {
DCHECK(code()->kind() == Code::BUILTIN);
- set_formal_parameter_count(kDontAdaptArgumentsSentinel);
+ set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
}
@@ -5898,6 +5936,11 @@ bool SharedFunctionInfo::is_compiled() {
}
+bool SharedFunctionInfo::is_simple_parameter_list() {
+ return scope_info()->IsSimpleParameterList();
+}
+
+
bool SharedFunctionInfo::IsApiFunction() {
return function_data()->IsFunctionTemplateInfo();
}
@@ -6017,8 +6060,8 @@ bool JSFunction::IsFromExtensionScript() {
bool JSFunction::NeedsArgumentsAdaption() {
- return shared()->formal_parameter_count() !=
- SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ return shared()->internal_formal_parameter_count() !=
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel;
}
@@ -6171,6 +6214,11 @@ bool JSFunction::is_compiled() {
}
+bool JSFunction::is_simple_parameter_list() {
+ return shared()->is_simple_parameter_list();
+}
+
+
FixedArray* JSFunction::literals() {
DCHECK(!shared()->bound());
return literals_or_bindings();
@@ -6194,7 +6242,7 @@ void JSFunction::set_function_bindings(FixedArray* bindings) {
// Bound function literal may be initialized to the empty fixed array
// before the bindings are set.
DCHECK(bindings == GetHeap()->empty_fixed_array() ||
- bindings->map() == GetHeap()->fixed_cow_array_map());
+ bindings->map() == GetHeap()->fixed_array_map());
set_literals_or_bindings(bindings);
}
@@ -7163,13 +7211,18 @@ Handle<ObjectHashTable> ObjectHashTable::Shrink(
template <int entrysize>
bool WeakHashTableShape<entrysize>::IsMatch(Handle<Object> key, Object* other) {
- return key->SameValue(other);
+ if (other->IsWeakCell()) other = WeakCell::cast(other)->value();
+ return key->IsWeakCell() ? WeakCell::cast(*key)->value() == other
+ : *key == other;
}
template <int entrysize>
uint32_t WeakHashTableShape<entrysize>::Hash(Handle<Object> key) {
- intptr_t hash = reinterpret_cast<intptr_t>(*key);
+ intptr_t hash =
+ key->IsWeakCell()
+ ? reinterpret_cast<intptr_t>(WeakCell::cast(*key)->value())
+ : reinterpret_cast<intptr_t>(*key);
return (uint32_t)(hash & 0xFFFFFFFF);
}
@@ -7177,6 +7230,7 @@ uint32_t WeakHashTableShape<entrysize>::Hash(Handle<Object> key) {
template <int entrysize>
uint32_t WeakHashTableShape<entrysize>::HashForObject(Handle<Object> key,
Object* other) {
+ if (other->IsWeakCell()) other = WeakCell::cast(other)->value();
intptr_t hash = reinterpret_cast<intptr_t>(other);
return (uint32_t)(hash & 0xFFFFFFFF);
}
@@ -7489,6 +7543,49 @@ Object* JSMapIterator::CurrentValue() {
}
+class String::SubStringRange::iterator FINAL {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef int difference_type;
+ typedef uc16 value_type;
+ typedef uc16* pointer;
+ typedef uc16& reference;
+
+ iterator(const iterator& other)
+ : content_(other.content_), offset_(other.offset_) {}
+
+ uc16 operator*() { return content_.Get(offset_); }
+ bool operator==(const iterator& other) const {
+ return content_.UsesSameString(other.content_) && offset_ == other.offset_;
+ }
+ bool operator!=(const iterator& other) const {
+ return !content_.UsesSameString(other.content_) || offset_ != other.offset_;
+ }
+ iterator& operator++() {
+ ++offset_;
+ return *this;
+ }
+ iterator operator++(int);
+
+ private:
+ friend class String;
+ iterator(String* from, int offset)
+ : content_(from->GetFlatContent()), offset_(offset) {}
+ String::FlatContent content_;
+ int offset_;
+};
+
+
+String::SubStringRange::iterator String::SubStringRange::begin() {
+ return String::SubStringRange::iterator(string_, first_);
+}
+
+
+String::SubStringRange::iterator String::SubStringRange::end() {
+ return String::SubStringRange::iterator(string_, first_ + length_);
+}
+
+
#undef TYPE_CHECKER
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 9805490c31..87f039628b 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -231,26 +231,27 @@ void JSObject::PrintProperties(std::ostream& os) { // NOLINT
descs->GetKey(i)->NamePrint(os);
os << ": ";
switch (descs->GetType(i)) {
- case FIELD: {
+ case DATA: {
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
if (IsUnboxedDoubleField(index)) {
os << "<unboxed double> " << RawFastDoublePropertyAt(index);
} else {
os << Brief(RawFastPropertyAt(index));
}
- os << " (field at offset " << index.property_index() << ")\n";
+ os << " (data field at offset " << index.property_index() << ")\n";
break;
}
- case ACCESSOR_FIELD: {
+ case ACCESSOR: {
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
- os << " (accessor at offset " << index.property_index() << ")\n";
+ os << " (accessor field at offset " << index.property_index()
+ << ")\n";
break;
}
- case CONSTANT:
- os << Brief(descs->GetConstant(i)) << " (constant)\n";
+ case DATA_CONSTANT:
+ os << Brief(descs->GetConstant(i)) << " (data constant)\n";
break;
- case CALLBACKS:
- os << Brief(descs->GetCallbacksObject(i)) << " (callbacks)\n";
+ case ACCESSOR_CONSTANT:
+ os << Brief(descs->GetCallbacksObject(i)) << " (accessor constant)\n";
break;
}
}
@@ -423,6 +424,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (has_instance_call_handler()) os << " - instance_call_handler\n";
if (is_access_check_needed()) os << " - access_check_needed\n";
if (!is_extensible()) os << " - non-extensible\n";
+ if (is_observed()) os << " - observed\n";
os << " - back pointer: " << Brief(GetBackPointer());
os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
<< "#" << NumberOfOwnDescriptors() << ": "
@@ -863,24 +865,6 @@ void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(
}
-void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(
- std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "DeclaredAccessorInfo");
- os << "\n - name: " << Brief(name());
- os << "\n - flag: " << Brief(flag());
- os << "\n - descriptor: " << Brief(descriptor());
- os << "\n";
-}
-
-
-void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(
- std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "DeclaredAccessorDescriptor");
- os << "\n - internal field: " << Brief(serialized_data());
- os << "\n";
-}
-
-
void Box::BoxPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Box");
os << "\n - value: " << Brief(value());
@@ -944,6 +928,7 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n - hidden_prototype: " << (hidden_prototype() ? "true" : "false");
os << "\n - undetectable: " << (undetectable() ? "true" : "false");
os << "\n - need_access_check: " << (needs_access_check() ? "true" : "false");
+ os << "\n - instantiated: " << (instantiated() ? "true" : "false");
os << "\n";
}
@@ -959,14 +944,6 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
}
-void SignatureInfo::SignatureInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "SignatureInfo");
- os << "\n - receiver: " << Brief(receiver());
- os << "\n - args: " << Brief(args());
- os << "\n";
-}
-
-
void TypeSwitchInfo::TypeSwitchInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "TypeSwitchInfo");
os << "\n - types: " << Brief(types());
@@ -1195,11 +1172,11 @@ void TransitionArray::PrintTransitions(std::ostream& os,
} else {
PropertyDetails details = GetTargetDetails(key, target);
os << " (transition to ";
- if (details.location() == IN_DESCRIPTOR) {
+ if (details.location() == kDescriptor) {
os << "immutable ";
}
- os << (details.kind() == DATA ? "data" : "accessor");
- if (details.location() == IN_DESCRIPTOR) {
+ os << (details.kind() == kData ? "data" : "accessor");
+ if (details.location() == kDescriptor) {
os << " " << Brief(GetTargetValue(i));
}
os << "), attrs: " << details.attributes();
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 414306f2b4..0eda4912e6 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -234,151 +234,22 @@ bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
}
-template<typename To>
-static inline To* CheckedCast(void *from) {
- uintptr_t temp = reinterpret_cast<uintptr_t>(from);
- DCHECK(temp % sizeof(To) == 0);
- return reinterpret_cast<To*>(temp);
-}
-
-
-static Handle<Object> PerformCompare(const BitmaskCompareDescriptor& descriptor,
- char* ptr,
- Isolate* isolate) {
- uint32_t bitmask = descriptor.bitmask;
- uint32_t compare_value = descriptor.compare_value;
- uint32_t value;
- switch (descriptor.size) {
- case 1:
- value = static_cast<uint32_t>(*CheckedCast<uint8_t>(ptr));
- compare_value &= 0xff;
- bitmask &= 0xff;
- break;
- case 2:
- value = static_cast<uint32_t>(*CheckedCast<uint16_t>(ptr));
- compare_value &= 0xffff;
- bitmask &= 0xffff;
- break;
- case 4:
- value = *CheckedCast<uint32_t>(ptr);
- break;
- default:
- UNREACHABLE();
- return isolate->factory()->undefined_value();
- }
- return isolate->factory()->ToBoolean(
- (bitmask & value) == (bitmask & compare_value));
-}
-
-
-static Handle<Object> PerformCompare(const PointerCompareDescriptor& descriptor,
- char* ptr,
- Isolate* isolate) {
- uintptr_t compare_value =
- reinterpret_cast<uintptr_t>(descriptor.compare_value);
- uintptr_t value = *CheckedCast<uintptr_t>(ptr);
- return isolate->factory()->ToBoolean(compare_value == value);
-}
-
-
-static Handle<Object> GetPrimitiveValue(
- const PrimitiveValueDescriptor& descriptor,
- char* ptr,
- Isolate* isolate) {
- int32_t int32_value = 0;
- switch (descriptor.data_type) {
- case kDescriptorInt8Type:
- int32_value = *CheckedCast<int8_t>(ptr);
- break;
- case kDescriptorUint8Type:
- int32_value = *CheckedCast<uint8_t>(ptr);
- break;
- case kDescriptorInt16Type:
- int32_value = *CheckedCast<int16_t>(ptr);
- break;
- case kDescriptorUint16Type:
- int32_value = *CheckedCast<uint16_t>(ptr);
- break;
- case kDescriptorInt32Type:
- int32_value = *CheckedCast<int32_t>(ptr);
- break;
- case kDescriptorUint32Type: {
- uint32_t value = *CheckedCast<uint32_t>(ptr);
- AllowHeapAllocation allow_gc;
- return isolate->factory()->NewNumberFromUint(value);
- }
- case kDescriptorBoolType: {
- uint8_t byte = *CheckedCast<uint8_t>(ptr);
- return isolate->factory()->ToBoolean(
- byte & (0x1 << descriptor.bool_offset));
- }
- case kDescriptorFloatType: {
- float value = *CheckedCast<float>(ptr);
- AllowHeapAllocation allow_gc;
- return isolate->factory()->NewNumber(value);
- }
- case kDescriptorDoubleType: {
- double value = *CheckedCast<double>(ptr);
- AllowHeapAllocation allow_gc;
- return isolate->factory()->NewNumber(value);
- }
- }
- AllowHeapAllocation allow_gc;
- return isolate->factory()->NewNumberFromInt(int32_value);
-}
-
-
-static Handle<Object> GetDeclaredAccessorProperty(
- Handle<Object> receiver,
- Handle<DeclaredAccessorInfo> info,
- Isolate* isolate) {
- DisallowHeapAllocation no_gc;
- char* current = reinterpret_cast<char*>(*receiver);
- DeclaredAccessorDescriptorIterator iterator(info->descriptor());
- while (true) {
- const DeclaredAccessorDescriptorData* data = iterator.Next();
- switch (data->type) {
- case kDescriptorReturnObject: {
- DCHECK(iterator.Complete());
- current = *CheckedCast<char*>(current);
- return handle(*CheckedCast<Object*>(current), isolate);
- }
- case kDescriptorPointerDereference:
- DCHECK(!iterator.Complete());
- current = *reinterpret_cast<char**>(current);
- break;
- case kDescriptorPointerShift:
- DCHECK(!iterator.Complete());
- current += data->pointer_shift_descriptor.byte_offset;
- break;
- case kDescriptorObjectDereference: {
- DCHECK(!iterator.Complete());
- Object* object = CheckedCast<Object>(current);
- int field = data->object_dereference_descriptor.internal_field;
- Object* smi = JSObject::cast(object)->GetInternalField(field);
- DCHECK(smi->IsSmi());
- current = reinterpret_cast<char*>(smi);
- break;
- }
- case kDescriptorBitmaskCompare:
- DCHECK(iterator.Complete());
- return PerformCompare(data->bitmask_compare_descriptor,
- current,
- isolate);
- case kDescriptorPointerCompare:
- DCHECK(iterator.Complete());
- return PerformCompare(data->pointer_compare_descriptor,
- current,
- isolate);
- case kDescriptorPrimitiveValue:
- DCHECK(iterator.Complete());
- return GetPrimitiveValue(data->primitive_value_descriptor,
- current,
- isolate);
- }
+// TODO(dcarney): CallOptimization duplicates this logic, merge.
+Object* FunctionTemplateInfo::GetCompatibleReceiver(Isolate* isolate,
+ Object* receiver) {
+ // API calls are only supported with JSObject receivers.
+ if (!receiver->IsJSObject()) return isolate->heap()->null_value();
+ Object* recv_type = this->signature();
+ // No signature, return holder.
+ if (recv_type->IsUndefined()) return receiver;
+ FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
+ // Check the receiver.
+ for (PrototypeIterator iter(isolate, receiver,
+ PrototypeIterator::START_AT_RECEIVER);
+ !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+ if (signature->IsTemplateFor(iter.GetCurrent())) return iter.GetCurrent();
}
- UNREACHABLE();
- return isolate->factory()->undefined_value();
+ return isolate->heap()->null_value();
}
@@ -420,18 +291,12 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(Handle<Object> receiver,
if (structure->IsAccessorInfo()) {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
if (!info->IsCompatibleReceiver(*receiver)) {
- Handle<Object> args[2] = { name, receiver };
+ Handle<Object> args[] = {name, receiver};
THROW_NEW_ERROR(isolate,
NewTypeError("incompatible_method_receiver",
HandleVector(args, arraysize(args))),
Object);
}
- if (structure->IsDeclaredAccessorInfo()) {
- return GetDeclaredAccessorProperty(
- receiver,
- Handle<DeclaredAccessorInfo>::cast(structure),
- isolate);
- }
Handle<ExecutableAccessorInfo> data =
Handle<ExecutableAccessorInfo>::cast(structure);
@@ -466,11 +331,10 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(Handle<Object> receiver,
}
-bool AccessorInfo::IsCompatibleReceiverType(Isolate* isolate,
- Handle<AccessorInfo> info,
- Handle<HeapType> type) {
+bool AccessorInfo::IsCompatibleReceiverMap(Isolate* isolate,
+ Handle<AccessorInfo> info,
+ Handle<Map> map) {
if (!info->HasExpectedReceiverType()) return true;
- Handle<Map> map = IC::TypeToMap(*type, isolate);
if (!map->IsJSObjectMap()) return false;
return FunctionTemplateInfo::cast(info->expected_receiver_type())
->IsTemplateFor(*map);
@@ -479,7 +343,8 @@ bool AccessorInfo::IsCompatibleReceiverType(Isolate* isolate,
MaybeHandle<Object> Object::SetPropertyWithAccessor(
Handle<Object> receiver, Handle<Name> name, Handle<Object> value,
- Handle<JSObject> holder, Handle<Object> structure, StrictMode strict_mode) {
+ Handle<JSObject> holder, Handle<Object> structure,
+ LanguageMode language_mode) {
Isolate* isolate = name->GetIsolate();
// We should never get here to initialize a const with the hole
@@ -491,7 +356,7 @@ MaybeHandle<Object> Object::SetPropertyWithAccessor(
// api style callbacks
ExecutableAccessorInfo* info = ExecutableAccessorInfo::cast(*structure);
if (!info->IsCompatibleReceiver(*receiver)) {
- Handle<Object> args[2] = { name, receiver };
+ Handle<Object> args[] = {name, receiver};
THROW_NEW_ERROR(isolate,
NewTypeError("incompatible_method_receiver",
HandleVector(args, arraysize(args))),
@@ -517,19 +382,15 @@ MaybeHandle<Object> Object::SetPropertyWithAccessor(
return SetPropertyWithDefinedSetter(
receiver, Handle<JSReceiver>::cast(setter), value);
} else {
- if (strict_mode == SLOPPY) return value;
- Handle<Object> args[2] = { name, holder };
- THROW_NEW_ERROR(
- isolate, NewTypeError("no_setter_in_callback", HandleVector(args, 2)),
- Object);
+ if (is_sloppy(language_mode)) return value;
+ Handle<Object> args[] = {name, holder};
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("no_setter_in_callback",
+ HandleVector(args, arraysize(args))),
+ Object);
}
}
- // TODO(dcarney): Handle correctly.
- if (structure->IsDeclaredAccessorInfo()) {
- return value;
- }
-
UNREACHABLE();
return MaybeHandle<Object>();
}
@@ -572,12 +433,19 @@ MaybeHandle<Object> Object::SetPropertyWithDefinedSetter(
static bool FindAllCanReadHolder(LookupIterator* it) {
- for (; it->IsFound(); it->Next()) {
+ // Skip current iteration, it's in state ACCESS_CHECK or INTERCEPTOR, both of
+ // which have already been checked.
+ DCHECK(it->state() == LookupIterator::ACCESS_CHECK ||
+ it->state() == LookupIterator::INTERCEPTOR);
+ for (it->Next(); it->IsFound(); it->Next()) {
if (it->state() == LookupIterator::ACCESSOR) {
- Handle<Object> accessors = it->GetAccessors();
+ auto accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
if (AccessorInfo::cast(*accessors)->all_can_read()) return true;
}
+ } else if (it->state() == LookupIterator::INTERCEPTOR) {
+ auto holder = it->GetHolder<JSObject>();
+ if (holder->GetNamedInterceptor()->all_can_read()) return true;
}
}
return false;
@@ -587,10 +455,18 @@ static bool FindAllCanReadHolder(LookupIterator* it) {
MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
LookupIterator* it) {
Handle<JSObject> checked = it->GetHolder<JSObject>();
- if (FindAllCanReadHolder(it)) {
- return GetPropertyWithAccessor(it->GetReceiver(), it->name(),
- it->GetHolder<JSObject>(),
- it->GetAccessors());
+ while (FindAllCanReadHolder(it)) {
+ if (it->state() == LookupIterator::ACCESSOR) {
+ return GetPropertyWithAccessor(it->GetReceiver(), it->name(),
+ it->GetHolder<JSObject>(),
+ it->GetAccessors());
+ }
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ auto receiver = Handle<JSObject>::cast(it->GetReceiver());
+ auto result = GetPropertyWithInterceptor(it->GetHolder<JSObject>(),
+ receiver, it->name());
+ if (it->isolate()->has_scheduled_exception()) break;
+ if (!result.is_null()) return result;
}
it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_GET);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
@@ -601,8 +477,16 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
LookupIterator* it) {
Handle<JSObject> checked = it->GetHolder<JSObject>();
- if (FindAllCanReadHolder(it))
- return maybe(it->property_details().attributes());
+ while (FindAllCanReadHolder(it)) {
+ if (it->state() == LookupIterator::ACCESSOR) {
+ return maybe(it->property_details().attributes());
+ }
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ auto result = GetPropertyAttributesWithInterceptor(
+ it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
+ if (it->isolate()->has_scheduled_exception()) break;
+ if (result.has_value && result.value != ABSENT) return result;
+ }
it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_HAS);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(),
Maybe<PropertyAttributes>());
@@ -624,12 +508,12 @@ static bool FindAllCanWriteHolder(LookupIterator* it) {
MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value, StrictMode strict_mode) {
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode) {
Handle<JSObject> checked = it->GetHolder<JSObject>();
if (FindAllCanWriteHolder(it)) {
return SetPropertyWithAccessor(it->GetReceiver(), it->name(), value,
it->GetHolder<JSObject>(),
- it->GetAccessors(), strict_mode);
+ it->GetAccessors(), language_mode);
}
it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_SET);
@@ -689,50 +573,62 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
}
-Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
- Handle<Name> name,
- DeleteMode mode) {
- DCHECK(!object->HasFastProperties());
- Isolate* isolate = object->GetIsolate();
- Handle<NameDictionary> dictionary(object->property_dictionary());
- int entry = dictionary->FindEntry(name);
- if (entry != NameDictionary::kNotFound) {
- // If we have a global object set the cell to the hole.
- if (object->IsGlobalObject()) {
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (!details.IsConfigurable()) {
- if (mode != FORCE_DELETION) return isolate->factory()->false_value();
- // When forced to delete global properties, we have to make a
- // map change to invalidate any ICs that think they can load
- // from the non-configurable cell without checking if it contains
- // the hole value.
- Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
- DCHECK(new_map->is_dictionary_map());
-#if TRACE_MAPS
- if (FLAG_trace_maps) {
- PrintF("[TraceMaps: GlobalDeleteNormalized from= %p to= %p ]\n",
- reinterpret_cast<void*>(object->map()),
- reinterpret_cast<void*>(*new_map));
- }
-#endif
- JSObject::MigrateToMap(object, new_map);
- }
- Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
- Handle<Object> value = isolate->factory()->the_hole_value();
- PropertyCell::SetValueInferType(cell, value);
- dictionary->DetailsAtPut(entry, details.AsDeleted());
- } else {
- Handle<Object> deleted(
- NameDictionary::DeleteProperty(dictionary, entry, mode));
- if (*deleted == isolate->heap()->true_value()) {
- Handle<NameDictionary> new_properties =
- NameDictionary::Shrink(dictionary, name);
- object->set_properties(*new_properties);
- }
- return deleted;
- }
+static MaybeHandle<JSObject> FindIndexedAllCanReadHolder(
+ Isolate* isolate, Handle<JSObject> js_object,
+ PrototypeIterator::WhereToStart where_to_start) {
+ for (PrototypeIterator iter(isolate, js_object, where_to_start);
+ !iter.IsAtEnd(); iter.Advance()) {
+ auto curr = PrototypeIterator::GetCurrent(iter);
+ if (!curr->IsJSObject()) break;
+ auto obj = Handle<JSObject>::cast(curr);
+ if (!obj->HasIndexedInterceptor()) continue;
+ if (obj->GetIndexedInterceptor()->all_can_read()) return obj;
}
- return isolate->factory()->true_value();
+ return MaybeHandle<JSObject>();
+}
+
+
+MaybeHandle<Object> JSObject::GetElementWithFailedAccessCheck(
+ Isolate* isolate, Handle<JSObject> object, Handle<Object> receiver,
+ uint32_t index) {
+ Handle<JSObject> holder = object;
+ PrototypeIterator::WhereToStart where_to_start =
+ PrototypeIterator::START_AT_RECEIVER;
+ while (true) {
+ auto all_can_read_holder =
+ FindIndexedAllCanReadHolder(isolate, holder, where_to_start);
+ if (!all_can_read_holder.ToHandle(&holder)) break;
+ auto result =
+ JSObject::GetElementWithInterceptor(holder, receiver, index, false);
+ if (isolate->has_scheduled_exception()) break;
+ if (!result.is_null()) return result;
+ where_to_start = PrototypeIterator::START_AT_PROTOTYPE;
+ }
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_GET);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
+}
+
+
+Maybe<PropertyAttributes> JSObject::GetElementAttributesWithFailedAccessCheck(
+ Isolate* isolate, Handle<JSObject> object, Handle<Object> receiver,
+ uint32_t index) {
+ Handle<JSObject> holder = object;
+ PrototypeIterator::WhereToStart where_to_start =
+ PrototypeIterator::START_AT_RECEIVER;
+ while (true) {
+ auto all_can_read_holder =
+ FindIndexedAllCanReadHolder(isolate, holder, where_to_start);
+ if (!all_can_read_holder.ToHandle(&holder)) break;
+ auto result =
+ JSObject::GetElementAttributeFromInterceptor(object, receiver, index);
+ if (isolate->has_scheduled_exception()) break;
+ if (result.has_value && result.value != ABSENT) return result;
+ where_to_start = PrototypeIterator::START_AT_PROTOTYPE;
+ }
+ isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<PropertyAttributes>());
+ return maybe(ABSENT);
}
@@ -768,14 +664,14 @@ MaybeHandle<Object> Object::GetElementWithReceiver(Isolate* isolate,
// Check access rights if needed.
if (js_object->IsAccessCheckNeeded()) {
if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
+ return JSObject::GetElementWithFailedAccessCheck(isolate, js_object,
+ receiver, index);
}
}
if (js_object->HasIndexedInterceptor()) {
- return JSObject::GetElementWithInterceptor(js_object, receiver, index);
+ return JSObject::GetElementWithInterceptor(js_object, receiver, index,
+ true);
}
if (js_object->elements() != isolate->heap()->empty_fixed_array()) {
@@ -794,7 +690,7 @@ MaybeHandle<Object> Object::GetElementWithReceiver(Isolate* isolate,
MaybeHandle<Object> Object::SetElementWithReceiver(
Isolate* isolate, Handle<Object> object, Handle<Object> receiver,
- uint32_t index, Handle<Object> value, StrictMode strict_mode) {
+ uint32_t index, Handle<Object> value, LanguageMode language_mode) {
// Iterate up the prototype chain until an element is found or the null
// prototype is encountered.
bool done = false;
@@ -828,7 +724,7 @@ MaybeHandle<Object> Object::SetElementWithReceiver(
if (!from_interceptor.has_value) return MaybeHandle<Object>();
if ((from_interceptor.value & READ_ONLY) != 0) {
return WriteToReadOnlyElement(isolate, receiver, index, value,
- strict_mode);
+ language_mode);
}
done = from_interceptor.value != ABSENT;
}
@@ -836,17 +732,15 @@ MaybeHandle<Object> Object::SetElementWithReceiver(
if (!done &&
js_object->elements() != isolate->heap()->empty_fixed_array()) {
ElementsAccessor* accessor = js_object->GetElementsAccessor();
- PropertyAttributes attrs =
- accessor->GetAttributes(receiver, js_object, index);
+ PropertyAttributes attrs = accessor->GetAttributes(js_object, index);
if ((attrs & READ_ONLY) != 0) {
return WriteToReadOnlyElement(isolate, receiver, index, value,
- strict_mode);
+ language_mode);
}
- Handle<AccessorPair> accessor_pair;
- if (accessor->GetAccessorPair(receiver, js_object, index)
- .ToHandle(&accessor_pair)) {
- return JSObject::SetElementWithCallback(receiver, accessor_pair, index,
- value, js_object, strict_mode);
+ Handle<AccessorPair> pair;
+ if (accessor->GetAccessorPair(js_object, index).ToHandle(&pair)) {
+ return JSObject::SetElementWithCallback(receiver, pair, index, value,
+ js_object, language_mode);
} else {
done = attrs != ABSENT;
}
@@ -854,17 +748,18 @@ MaybeHandle<Object> Object::SetElementWithReceiver(
}
if (!receiver->IsJSObject()) {
- return WriteToReadOnlyElement(isolate, receiver, index, value, strict_mode);
+ return WriteToReadOnlyElement(isolate, receiver, index, value,
+ language_mode);
}
Handle<JSObject> target = Handle<JSObject>::cast(receiver);
ElementsAccessor* accessor = target->GetElementsAccessor();
- PropertyAttributes attrs = accessor->GetAttributes(receiver, target, index);
- if ((attrs & READ_ONLY) != 0) {
- return WriteToReadOnlyElement(isolate, receiver, index, value, strict_mode);
+ PropertyAttributes attrs = accessor->GetAttributes(target, index);
+ if (attrs == ABSENT) {
+ return JSObject::SetElement(target, index, value, NONE, language_mode,
+ false);
}
- PropertyAttributes new_attrs = attrs != ABSENT ? attrs : NONE;
- return JSObject::SetElement(target, index, value, new_attrs, strict_mode,
- false);
+ return JSObject::SetElement(target, index, value, attrs, language_mode, false,
+ DEFINE_PROPERTY);
}
@@ -904,7 +799,9 @@ Object* Object::GetHash() {
// The object is either a number, a name, an odd-ball,
// a real JS object, or a Harmony proxy.
if (IsNumber()) {
- uint32_t hash = ComputeLongHash(double_to_uint64(Number()));
+ uint32_t hash = std::isnan(Number())
+ ? Smi::kMaxValue
+ : ComputeLongHash(double_to_uint64(Number()));
return Smi::FromInt(hash & Smi::kMaxValue);
}
if (IsName()) {
@@ -1369,6 +1266,25 @@ void JSObject::PrintElementsTransition(
}
+void Map::PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
+ PropertyAttributes attributes) {
+ OFStream os(file);
+ os << "[reconfiguring ";
+ constructor_name()->PrintOn(file);
+ os << "] ";
+ Name* name = instance_descriptors()->GetKey(modify_index);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
+ } else {
+ os << "{symbol " << static_cast<void*>(name) << "}";
+ }
+ os << ": " << (kind == kData ? "kData" : "ACCESSORS") << ", attrs: ";
+ os << attributes << " [";
+ JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
+ os << "]\n";
+}
+
+
void Map::PrintGeneralization(FILE* file,
const char* reason,
int modify_index,
@@ -1425,8 +1341,8 @@ void JSObject::PrintInstanceMigration(FILE* file,
if (!o_r.Equals(n_r)) {
String::cast(o->GetKey(i))->PrintOn(file);
PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
- } else if (o->GetDetails(i).type() == CONSTANT &&
- n->GetDetails(i).type() == FIELD) {
+ } else if (o->GetDetails(i).type() == DATA_CONSTANT &&
+ n->GetDetails(i).type() == DATA) {
Name* name = o->GetKey(i);
if (name->IsString()) {
String::cast(name)->PrintOn(file);
@@ -1804,7 +1720,7 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map,
type = HeapType::Any(isolate);
}
- FieldDescriptor new_field_desc(name, index, type, attributes, representation);
+ DataDescriptor new_field_desc(name, index, type, attributes, representation);
Handle<Map> new_map = Map::CopyAddDescriptor(map, &new_field_desc, flag);
int unused_property_fields = new_map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
@@ -1826,7 +1742,7 @@ MaybeHandle<Map> Map::CopyWithConstant(Handle<Map> map,
}
// Allocate new instance descriptors with (name, constant) added.
- ConstantDescriptor new_constant_desc(name, constant, attributes);
+ DataConstantDescriptor new_constant_desc(name, constant, attributes);
return Map::CopyAddDescriptor(map, &new_constant_desc, flag);
}
@@ -1847,16 +1763,14 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
- PropertyDetails details(attributes, FIELD, index);
+ PropertyDetails details(attributes, DATA, index);
dict->SetNextEnumerationIndex(index + 1);
dict->SetEntry(entry, name, cell, details);
return;
}
- Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(value);
- PropertyCell::SetValueInferType(cell, value);
- value = cell;
+ value = isolate->factory()->NewPropertyCell(value);
}
- PropertyDetails details(attributes, FIELD, 0);
+ PropertyDetails details(attributes, DATA, 0);
Handle<NameDictionary> result =
NameDictionary::Add(dict, name, value, details);
if (*dict != *result) object->set_properties(*result);
@@ -2051,7 +1965,7 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
} else {
value = isolate->factory()->uninitialized_value();
}
- DCHECK(details.type() == FIELD);
+ DCHECK(details.type() == DATA);
int target_index = details.field_index() - inobject;
DCHECK(target_index >= 0); // Must be a backing store index.
new_storage->set(target_index, *value);
@@ -2077,18 +1991,21 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
for (int i = 0; i < old_nof; i++) {
PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
+ if (details.type() != DATA) continue;
PropertyDetails old_details = old_descriptors->GetDetails(i);
- if (old_details.type() == CALLBACKS) {
- DCHECK(details.representation().IsTagged());
- continue;
- }
Representation old_representation = old_details.representation();
Representation representation = details.representation();
- DCHECK(old_details.type() == CONSTANT ||
- old_details.type() == FIELD);
Handle<Object> value;
- if (old_details.type() == CONSTANT) {
+ if (old_details.type() == ACCESSOR_CONSTANT) {
+ // In case of kAccessor -> kData property reconfiguration, the property
+ // must already be prepared for data or certain type.
+ DCHECK(!details.representation().IsNone());
+ if (details.representation().IsDouble()) {
+ value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+ } else {
+ value = isolate->factory()->uninitialized_value();
+ }
+ } else if (old_details.type() == DATA_CONSTANT) {
value = handle(old_descriptors->GetValue(i), isolate);
DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
} else {
@@ -2119,7 +2036,7 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
for (int i = old_nof; i < new_nof; i++) {
PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
+ if (details.type() != DATA) continue;
Handle<Object> value;
if (details.representation().IsDouble()) {
value = isolate->factory()->NewHeapNumber(0, MUTABLE);
@@ -2182,17 +2099,15 @@ int Map::NumberOfFields() {
DescriptorArray* descriptors = instance_descriptors();
int result = 0;
for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
- if (descriptors->GetDetails(i).type() == FIELD) result++;
+ if (descriptors->GetDetails(i).location() == kField) result++;
}
return result;
}
-Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
- int modify_index,
- StoreMode store_mode,
- PropertyAttributes attributes,
- const char* reason) {
+Handle<Map> Map::CopyGeneralizeAllRepresentations(
+ Handle<Map> map, int modify_index, StoreMode store_mode, PropertyKind kind,
+ PropertyAttributes attributes, const char* reason) {
Isolate* isolate = map->GetIsolate();
Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
@@ -2201,7 +2116,7 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
for (int i = 0; i < number_of_own_descriptors; i++) {
descriptors->SetRepresentation(i, Representation::Tagged());
- if (descriptors->GetDetails(i).type() == FIELD) {
+ if (descriptors->GetDetails(i).type() == DATA) {
descriptors->SetValue(i, HeapType::Any());
}
}
@@ -2213,52 +2128,43 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
// Unless the instance is being migrated, ensure that modify_index is a field.
- PropertyDetails details = descriptors->GetDetails(modify_index);
- if (store_mode == FORCE_IN_OBJECT &&
- (details.type() != FIELD || details.attributes() != attributes)) {
- int field_index = details.type() == FIELD ? details.field_index()
- : new_map->NumberOfFields();
- FieldDescriptor d(handle(descriptors->GetKey(modify_index), isolate),
- field_index, attributes, Representation::Tagged());
- descriptors->Replace(modify_index, &d);
- if (details.type() != FIELD) {
- int unused_property_fields = new_map->unused_property_fields() - 1;
- if (unused_property_fields < 0) {
- unused_property_fields += JSObject::kFieldsAdded;
+ if (modify_index >= 0) {
+ PropertyDetails details = descriptors->GetDetails(modify_index);
+ if (store_mode == FORCE_FIELD &&
+ (details.type() != DATA || details.attributes() != attributes)) {
+ int field_index = details.type() == DATA ? details.field_index()
+ : new_map->NumberOfFields();
+ DataDescriptor d(handle(descriptors->GetKey(modify_index), isolate),
+ field_index, attributes, Representation::Tagged());
+ descriptors->Replace(modify_index, &d);
+ if (details.type() != DATA) {
+ int unused_property_fields = new_map->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
+ }
+ new_map->set_unused_property_fields(unused_property_fields);
}
- new_map->set_unused_property_fields(unused_property_fields);
+ } else {
+ DCHECK(details.attributes() == attributes);
}
- } else {
- DCHECK(details.attributes() == attributes);
- }
- if (FLAG_trace_generalization) {
- HeapType* field_type = (details.type() == FIELD)
- ? map->instance_descriptors()->GetFieldType(modify_index)
- : NULL;
- map->PrintGeneralization(
- stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(),
- new_map->NumberOfOwnDescriptors(),
- details.type() == CONSTANT && store_mode == FORCE_IN_OBJECT,
- details.representation(), Representation::Tagged(), field_type,
- HeapType::Any());
+ if (FLAG_trace_generalization) {
+ HeapType* field_type =
+ (details.type() == DATA)
+ ? map->instance_descriptors()->GetFieldType(modify_index)
+ : NULL;
+ map->PrintGeneralization(
+ stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(),
+ new_map->NumberOfOwnDescriptors(),
+ details.type() == DATA_CONSTANT && store_mode == FORCE_FIELD,
+ details.representation(), Representation::Tagged(), field_type,
+ HeapType::Any());
+ }
}
return new_map;
}
-// static
-Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
- int modify_index,
- StoreMode store_mode,
- const char* reason) {
- PropertyDetails details =
- map->instance_descriptors()->GetDetails(modify_index);
- return CopyGeneralizeAllRepresentations(map, modify_index, store_mode,
- details.attributes(), reason);
-}
-
-
void Map::DeprecateTransitionTree() {
if (is_deprecated()) return;
if (HasTransitionArray()) {
@@ -2274,6 +2180,13 @@ void Map::DeprecateTransitionTree() {
}
+static inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
+ if (obj1 == obj2) return true; // Valid for both kData and kAccessor kinds.
+ // TODO(ishell): compare AccessorPairs.
+ return false;
+}
+
+
// Invalidates a transition target at |key|, and installs |new_descriptors| over
// the current instance_descriptors to ensure proper sharing of descriptor
// arrays.
@@ -2327,7 +2240,7 @@ Map* Map::FindLastMatchMap(int verbatim,
DisallowHeapAllocation no_allocation;
// This can only be called on roots of transition trees.
- DCHECK(GetBackPointer()->IsUndefined());
+ DCHECK_EQ(verbatim, NumberOfOwnDescriptors());
Map* current = this;
@@ -2344,16 +2257,22 @@ Map* Map::FindLastMatchMap(int verbatim,
DescriptorArray* next_descriptors = next->instance_descriptors();
PropertyDetails next_details = next_descriptors->GetDetails(i);
- if (details.type() != next_details.type()) break;
- if (details.attributes() != next_details.attributes()) break;
+ DCHECK_EQ(details.kind(), next_details.kind());
+ DCHECK_EQ(details.attributes(), next_details.attributes());
+ if (details.location() != next_details.location()) break;
if (!details.representation().Equals(next_details.representation())) break;
- if (next_details.type() == FIELD) {
- if (!descriptors->GetFieldType(i)->NowIs(
- next_descriptors->GetFieldType(i))) break;
+
+ if (next_details.location() == kField) {
+ HeapType* next_field_type = next_descriptors->GetFieldType(i);
+ if (!descriptors->GetFieldType(i)->NowIs(next_field_type)) {
+ break;
+ }
} else {
- if (descriptors->GetValue(i) != next_descriptors->GetValue(i)) break;
+ if (!EqualImmutableValues(descriptors->GetValue(i),
+ next_descriptors->GetValue(i))) {
+ break;
+ }
}
-
current = next;
}
return current;
@@ -2362,7 +2281,7 @@ Map* Map::FindLastMatchMap(int verbatim,
Map* Map::FindFieldOwner(int descriptor) {
DisallowHeapAllocation no_allocation;
- DCHECK_EQ(FIELD, instance_descriptors()->GetDetails(descriptor).type());
+ DCHECK_EQ(DATA, instance_descriptors()->GetDetails(descriptor).type());
Map* result = this;
while (true) {
Object* back = result->GetBackPointer();
@@ -2380,7 +2299,7 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
Handle<HeapType> new_type) {
DisallowHeapAllocation no_allocation;
PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
- if (details.type() != FIELD) return;
+ if (details.type() != DATA) return;
if (HasTransitionArray()) {
TransitionArray* transitions = this->transitions();
for (int i = 0; i < transitions->number_of_transitions(); ++i) {
@@ -2394,8 +2313,8 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
// Skip if already updated the shared descriptor.
if (instance_descriptors()->GetFieldType(descriptor) == *new_type) return;
- FieldDescriptor d(name, instance_descriptors()->GetFieldIndex(descriptor),
- new_type, details.attributes(), new_representation);
+ DataDescriptor d(name, instance_descriptors()->GetFieldIndex(descriptor),
+ new_type, details.attributes(), new_representation);
instance_descriptors()->Replace(descriptor, &d);
}
@@ -2404,18 +2323,8 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
Handle<HeapType> Map::GeneralizeFieldType(Handle<HeapType> type1,
Handle<HeapType> type2,
Isolate* isolate) {
- static const int kMaxClassesPerFieldType = 5;
if (type1->NowIs(type2)) return type2;
if (type2->NowIs(type1)) return type1;
- if (type1->NowStable() && type2->NowStable()) {
- Handle<HeapType> type = HeapType::Union(type1, type2, isolate);
- if (type->NumClasses() <= kMaxClassesPerFieldType) {
- DCHECK(type->NowStable());
- DCHECK(type1->NowIs(type));
- DCHECK(type2->NowIs(type));
- return type;
- }
- }
return HeapType::Any(isolate);
}
@@ -2469,16 +2378,42 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
}
-// Generalize the representation of the descriptor at |modify_index|.
-// This method rewrites the transition tree to reflect the new change. To avoid
-// high degrees over polymorphism, and to stabilize quickly, on every rewrite
-// the new type is deduced by merging the current type with any potential new
-// (partial) version of the type in the transition tree.
+static inline Handle<HeapType> GetFieldType(Isolate* isolate,
+ Handle<DescriptorArray> descriptors,
+ int descriptor,
+ PropertyLocation location,
+ Representation representation) {
+#ifdef DEBUG
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(details.location(), location);
+#endif
+ if (location == kField) {
+ return handle(descriptors->GetFieldType(descriptor), isolate);
+ } else {
+ return descriptors->GetValue(descriptor)
+ ->OptimalType(isolate, representation);
+ }
+}
+
+
+// Reconfigures property at |modify_index| with |new_kind|, |new_attributes|,
+// |store_mode| and/or |new_representation|/|new_field_type|.
+// If |modify_index| is negative then no properties are reconfigured but the
+// map is migrated to the up-to-date non-deprecated state.
+//
+// This method rewrites or completes the transition tree to reflect the new
+// change. To avoid high degrees over polymorphism, and to stabilize quickly,
+// on every rewrite the new type is deduced by merging the current type with
+// any potential new (partial) version of the type in the transition tree.
// To do this, on each rewrite:
// - Search the root of the transition tree using FindRootMap.
-// - Find |target_map|, the newest matching version of this map using the keys
-// in the |old_map|'s descriptor array to walk the transition tree.
-// - Merge/generalize the descriptor array of the |old_map| and |target_map|.
+// - Find |target_map|, the newest matching version of this map using the
+// virtually "enhanced" |old_map|'s descriptor array (i.e. whose entry at
+// |modify_index| is considered to be of |new_kind| and having
+// |new_attributes|) to walk the transition tree.
+// - Merge/generalize the "enhanced" descriptor array of the |old_map| and
+// descriptor array of the |target_map|.
// - Generalize the |modify_index| descriptor using |new_representation| and
// |new_field_type|.
// - Walk the tree again starting from the root towards |target_map|. Stop at
@@ -2488,68 +2423,128 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
// Return it.
// - Otherwise, invalidate the outdated transition target from |target_map|, and
// replace its transition tree with a new branch for the updated descriptors.
-Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
- int modify_index,
- Representation new_representation,
- Handle<HeapType> new_field_type,
- StoreMode store_mode) {
+Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
+ PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<HeapType> new_field_type,
+ StoreMode store_mode) {
+ DCHECK_NE(kAccessor, new_kind); // TODO(ishell): not supported yet.
+ DCHECK(store_mode != FORCE_FIELD || modify_index >= 0);
Isolate* isolate = old_map->GetIsolate();
Handle<DescriptorArray> old_descriptors(
old_map->instance_descriptors(), isolate);
int old_nof = old_map->NumberOfOwnDescriptors();
- PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
- Representation old_representation = old_details.representation();
-
- // It's fine to transition from None to anything but double without any
- // modification to the object, because the default uninitialized value for
- // representation None can be overwritten by both smi and tagged values.
- // Doubles, however, would require a box allocation.
- if (old_representation.IsNone() && !new_representation.IsNone() &&
+
+ // If it's just a representation generalization case (i.e. property kind and
+ // attributes stays unchanged) it's fine to transition from None to anything
+ // but double without any modification to the object, because the default
+ // uninitialized value for representation None can be overwritten by both
+ // smi and tagged values. Doubles, however, would require a box allocation.
+ if (modify_index >= 0 && !new_representation.IsNone() &&
!new_representation.IsDouble()) {
- DCHECK(old_details.type() == FIELD);
- if (FLAG_trace_generalization) {
- old_map->PrintGeneralization(
- stdout, "uninitialized field",
- modify_index, old_map->NumberOfOwnDescriptors(),
- old_map->NumberOfOwnDescriptors(), false,
- old_representation, new_representation,
- old_descriptors->GetFieldType(modify_index), *new_field_type);
- }
- Handle<Map> field_owner(old_map->FindFieldOwner(modify_index), isolate);
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ Representation old_representation = old_details.representation();
+
+ if (old_representation.IsNone()) {
+ DCHECK_EQ(new_kind, old_details.kind());
+ DCHECK_EQ(new_attributes, old_details.attributes());
+ DCHECK_EQ(DATA, old_details.type());
+ if (FLAG_trace_generalization) {
+ old_map->PrintGeneralization(
+ stdout, "uninitialized field", modify_index,
+ old_map->NumberOfOwnDescriptors(),
+ old_map->NumberOfOwnDescriptors(), false, old_representation,
+ new_representation, old_descriptors->GetFieldType(modify_index),
+ *new_field_type);
+ }
+ Handle<Map> field_owner(old_map->FindFieldOwner(modify_index), isolate);
- GeneralizeFieldType(field_owner, modify_index, new_representation,
- new_field_type);
- DCHECK(old_descriptors->GetDetails(modify_index).representation().Equals(
- new_representation));
- DCHECK(old_descriptors->GetFieldType(modify_index)->NowIs(new_field_type));
- return old_map;
+ GeneralizeFieldType(field_owner, modify_index, new_representation,
+ new_field_type);
+ DCHECK(old_descriptors->GetDetails(modify_index)
+ .representation()
+ .Equals(new_representation));
+ DCHECK(
+ old_descriptors->GetFieldType(modify_index)->NowIs(new_field_type));
+ return old_map;
+ }
}
// Check the state of the root map.
Handle<Map> root_map(old_map->FindRootMap(), isolate);
if (!old_map->EquivalentToForTransition(*root_map)) {
return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ new_kind, new_attributes,
"GenAll_NotEquivalent");
}
+
+ ElementsKind from_kind = root_map->elements_kind();
+ ElementsKind to_kind = old_map->elements_kind();
+ if (from_kind != to_kind &&
+ !(IsTransitionableFastElementsKind(from_kind) &&
+ IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ new_kind, new_attributes,
+ "GenAll_InvalidElementsTransition");
+ }
int root_nof = root_map->NumberOfOwnDescriptors();
- if (modify_index < root_nof) {
+ if (modify_index >= 0 && modify_index < root_nof) {
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
- if ((old_details.type() != FIELD && store_mode == FORCE_IN_OBJECT) ||
- (old_details.type() == FIELD &&
+ if (old_details.kind() != new_kind ||
+ old_details.attributes() != new_attributes) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ new_kind, new_attributes,
+ "GenAll_RootModification1");
+ }
+ if ((old_details.type() != DATA && store_mode == FORCE_FIELD) ||
+ (old_details.type() == DATA &&
(!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
!new_representation.fits_into(old_details.representation())))) {
return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
- "GenAll_RootModification");
+ new_kind, new_attributes,
+ "GenAll_RootModification2");
}
}
+ // From here on, use the map with correct elements kind as root map.
+ if (from_kind != to_kind) {
+ root_map = Map::AsElementsKind(root_map, to_kind);
+ }
+
Handle<Map> target_map = root_map;
for (int i = root_nof; i < old_nof; ++i) {
PropertyDetails old_details = old_descriptors->GetDetails(i);
- int j = target_map->SearchTransition(old_details.kind(),
- old_descriptors->GetKey(i),
- old_details.attributes());
+ PropertyKind next_kind;
+ PropertyLocation next_location;
+ PropertyAttributes next_attributes;
+ Representation next_representation;
+ bool property_kind_reconfiguration = false;
+
+ if (modify_index == i) {
+ DCHECK_EQ(FORCE_FIELD, store_mode);
+ property_kind_reconfiguration = old_details.kind() != new_kind;
+
+ next_kind = new_kind;
+ next_location = kField;
+ next_attributes = new_attributes;
+ // If property kind is not reconfigured merge the result with
+ // representation/field type from the old descriptor.
+ next_representation = new_representation;
+ if (!property_kind_reconfiguration) {
+ next_representation =
+ next_representation.generalize(old_details.representation());
+ }
+
+ } else {
+ next_kind = old_details.kind();
+ next_location = old_details.location();
+ next_attributes = old_details.attributes();
+ next_representation = old_details.representation();
+ }
+ int j = target_map->SearchTransition(next_kind, old_descriptors->GetKey(i),
+ next_attributes);
if (j == TransitionArray::kNotFound) break;
Handle<Map> tmp_map(target_map->GetTransition(j), isolate);
Handle<DescriptorArray> tmp_descriptors = handle(
@@ -2557,42 +2552,48 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
// Check if target map is incompatible.
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
- PropertyType old_type = old_details.type();
- PropertyType tmp_type = tmp_details.type();
- DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
- if ((tmp_type == CALLBACKS || old_type == CALLBACKS) &&
- (tmp_type != old_type ||
- tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
+ DCHECK_EQ(next_kind, tmp_details.kind());
+ DCHECK_EQ(next_attributes, tmp_details.attributes());
+ if (next_kind == kAccessor &&
+ !EqualImmutableValues(old_descriptors->GetValue(i),
+ tmp_descriptors->GetValue(i))) {
return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ new_kind, new_attributes,
"GenAll_Incompatible");
}
- Representation old_representation = old_details.representation();
+ if (next_location == kField && tmp_details.location() == kDescriptor) break;
+
Representation tmp_representation = tmp_details.representation();
- if (!old_representation.fits_into(tmp_representation) ||
- (!new_representation.fits_into(tmp_representation) &&
- modify_index == i)) {
- break;
- }
- if (tmp_type == FIELD) {
- // Generalize the field type as necessary.
- Handle<HeapType> old_field_type = (old_type == FIELD)
- ? handle(old_descriptors->GetFieldType(i), isolate)
- : old_descriptors->GetValue(i)->OptimalType(
- isolate, tmp_representation);
- if (modify_index == i) {
- old_field_type = GeneralizeFieldType(
- new_field_type, old_field_type, isolate);
- }
- GeneralizeFieldType(tmp_map, i, tmp_representation, old_field_type);
- } else if (tmp_type == CONSTANT) {
- if (old_type != CONSTANT ||
- old_descriptors->GetConstant(i) != tmp_descriptors->GetConstant(i)) {
- break;
+ if (!next_representation.fits_into(tmp_representation)) break;
+
+ PropertyLocation old_location = old_details.location();
+ PropertyLocation tmp_location = tmp_details.location();
+ if (tmp_location == kField) {
+ if (next_kind == kData) {
+ Handle<HeapType> next_field_type;
+ if (modify_index == i) {
+ next_field_type = new_field_type;
+ if (!property_kind_reconfiguration) {
+ Handle<HeapType> old_field_type =
+ GetFieldType(isolate, old_descriptors, i,
+ old_details.location(), tmp_representation);
+ next_field_type =
+ GeneralizeFieldType(next_field_type, old_field_type, isolate);
+ }
+ } else {
+ Handle<HeapType> old_field_type =
+ GetFieldType(isolate, old_descriptors, i, old_details.location(),
+ tmp_representation);
+ next_field_type = old_field_type;
+ }
+ GeneralizeFieldType(tmp_map, i, tmp_representation, next_field_type);
}
- } else {
- DCHECK_EQ(tmp_type, old_type);
- DCHECK_EQ(tmp_descriptors->GetValue(i), old_descriptors->GetValue(i));
+ } else if (old_location == kField ||
+ !EqualImmutableValues(old_descriptors->GetValue(i),
+ tmp_descriptors->GetValue(i))) {
+ break;
}
+ DCHECK(!tmp_map->is_deprecated());
target_map = tmp_map;
}
@@ -2601,37 +2602,56 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
target_map->instance_descriptors(), isolate);
int target_nof = target_map->NumberOfOwnDescriptors();
if (target_nof == old_nof &&
- (store_mode != FORCE_IN_OBJECT ||
- target_descriptors->GetDetails(modify_index).type() == FIELD)) {
- DCHECK(modify_index < target_nof);
- DCHECK(new_representation.fits_into(
- target_descriptors->GetDetails(modify_index).representation()));
- DCHECK(target_descriptors->GetDetails(modify_index).type() != FIELD ||
- new_field_type->NowIs(
- target_descriptors->GetFieldType(modify_index)));
+ (store_mode != FORCE_FIELD ||
+ (modify_index >= 0 &&
+ target_descriptors->GetDetails(modify_index).location() == kField))) {
+#ifdef DEBUG
+ if (modify_index >= 0) {
+ PropertyDetails details = target_descriptors->GetDetails(modify_index);
+ DCHECK_EQ(new_kind, details.kind());
+ DCHECK_EQ(new_attributes, details.attributes());
+ DCHECK(new_representation.fits_into(details.representation()));
+ DCHECK(details.location() != kField ||
+ new_field_type->NowIs(
+ target_descriptors->GetFieldType(modify_index)));
+ }
+#endif
return target_map;
}
// Find the last compatible target map in the transition tree.
for (int i = target_nof; i < old_nof; ++i) {
PropertyDetails old_details = old_descriptors->GetDetails(i);
- int j = target_map->SearchTransition(old_details.kind(),
- old_descriptors->GetKey(i),
- old_details.attributes());
+ PropertyKind next_kind;
+ PropertyAttributes next_attributes;
+ if (modify_index == i) {
+ next_kind = new_kind;
+ next_attributes = new_attributes;
+ } else {
+ next_kind = old_details.kind();
+ next_attributes = old_details.attributes();
+ }
+ int j = target_map->SearchTransition(next_kind, old_descriptors->GetKey(i),
+ next_attributes);
if (j == TransitionArray::kNotFound) break;
Handle<Map> tmp_map(target_map->GetTransition(j), isolate);
Handle<DescriptorArray> tmp_descriptors(
tmp_map->instance_descriptors(), isolate);
// Check if target map is compatible.
+#ifdef DEBUG
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
- DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
- if ((tmp_details.type() == CALLBACKS || old_details.type() == CALLBACKS) &&
- (tmp_details.type() != old_details.type() ||
- tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
+ DCHECK_EQ(next_kind, tmp_details.kind());
+ DCHECK_EQ(next_attributes, tmp_details.attributes());
+#endif
+ if (next_kind == kAccessor &&
+ !EqualImmutableValues(old_descriptors->GetValue(i),
+ tmp_descriptors->GetValue(i))) {
return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ new_kind, new_attributes,
"GenAll_Incompatible");
}
+ DCHECK(!tmp_map->is_deprecated());
target_map = tmp_map;
}
target_nof = target_map->NumberOfOwnDescriptors();
@@ -2654,7 +2674,7 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
int current_offset = 0;
for (int i = 0; i < root_nof; ++i) {
PropertyDetails old_details = old_descriptors->GetDetails(i);
- if (old_details.type() == FIELD) {
+ if (old_details.location() == kField) {
current_offset += old_details.field_width_in_words();
}
Descriptor d(handle(old_descriptors->GetKey(i), isolate),
@@ -2668,41 +2688,85 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
Handle<Name> target_key(target_descriptors->GetKey(i), isolate);
PropertyDetails old_details = old_descriptors->GetDetails(i);
PropertyDetails target_details = target_descriptors->GetDetails(i);
- target_details = target_details.CopyWithRepresentation(
- old_details.representation().generalize(
- target_details.representation()));
+
+ PropertyKind next_kind;
+ PropertyAttributes next_attributes;
+ PropertyLocation next_location;
+ Representation next_representation;
+ bool property_kind_reconfiguration = false;
+
if (modify_index == i) {
- target_details = target_details.CopyWithRepresentation(
- new_representation.generalize(target_details.representation()));
- }
- DCHECK_EQ(old_details.attributes(), target_details.attributes());
- if (old_details.type() == FIELD || target_details.type() == FIELD ||
- (modify_index == i && store_mode == FORCE_IN_OBJECT) ||
- (target_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
- Handle<HeapType> old_field_type = (old_details.type() == FIELD)
- ? handle(old_descriptors->GetFieldType(i), isolate)
- : old_descriptors->GetValue(i)->OptimalType(
- isolate, target_details.representation());
- Handle<HeapType> target_field_type = (target_details.type() == FIELD)
- ? handle(target_descriptors->GetFieldType(i), isolate)
- : target_descriptors->GetValue(i)->OptimalType(
- isolate, target_details.representation());
- target_field_type = GeneralizeFieldType(
- target_field_type, old_field_type, isolate);
- if (modify_index == i) {
- target_field_type = GeneralizeFieldType(
- target_field_type, new_field_type, isolate);
+ DCHECK_EQ(FORCE_FIELD, store_mode);
+ property_kind_reconfiguration = old_details.kind() != new_kind;
+
+ next_kind = new_kind;
+ next_attributes = new_attributes;
+ next_location = kField;
+
+ // Merge new representation/field type with ones from the target
+ // descriptor. If property kind is not reconfigured merge the result with
+ // representation/field type from the old descriptor.
+ next_representation =
+ new_representation.generalize(target_details.representation());
+ if (!property_kind_reconfiguration) {
+ next_representation =
+ next_representation.generalize(old_details.representation());
+ }
+ } else {
+ // Merge old_descriptor and target_descriptor entries.
+ DCHECK_EQ(target_details.kind(), old_details.kind());
+ next_kind = target_details.kind();
+ next_attributes = target_details.attributes();
+ next_location =
+ old_details.location() == kField ||
+ target_details.location() == kField ||
+ !EqualImmutableValues(target_descriptors->GetValue(i),
+ old_descriptors->GetValue(i))
+ ? kField
+ : kDescriptor;
+
+ next_representation = old_details.representation().generalize(
+ target_details.representation());
+ }
+ DCHECK_EQ(next_kind, target_details.kind());
+ DCHECK_EQ(next_attributes, target_details.attributes());
+
+ if (next_location == kField) {
+ if (next_kind == kData) {
+ Handle<HeapType> target_field_type =
+ GetFieldType(isolate, target_descriptors, i,
+ target_details.location(), next_representation);
+
+ Handle<HeapType> next_field_type;
+ if (modify_index == i) {
+ next_field_type =
+ GeneralizeFieldType(target_field_type, new_field_type, isolate);
+ if (!property_kind_reconfiguration) {
+ Handle<HeapType> old_field_type =
+ GetFieldType(isolate, old_descriptors, i,
+ old_details.location(), next_representation);
+ next_field_type =
+ GeneralizeFieldType(next_field_type, old_field_type, isolate);
+ }
+ } else {
+ Handle<HeapType> old_field_type =
+ GetFieldType(isolate, old_descriptors, i, old_details.location(),
+ next_representation);
+ next_field_type =
+ GeneralizeFieldType(target_field_type, old_field_type, isolate);
+ }
+ DataDescriptor d(target_key, current_offset, next_field_type,
+ next_attributes, next_representation);
+ current_offset += d.GetDetails().field_width_in_words();
+ new_descriptors->Set(i, &d);
+ } else {
+ UNIMPLEMENTED(); // TODO(ishell): implement.
}
- FieldDescriptor d(target_key, current_offset, target_field_type,
- target_details.attributes(),
- target_details.representation());
- current_offset += d.GetDetails().field_width_in_words();
- new_descriptors->Set(i, &d);
} else {
- DCHECK_NE(FIELD, target_details.type());
- Descriptor d(target_key,
- handle(target_descriptors->GetValue(i), isolate),
- target_details);
+ PropertyDetails details(next_attributes, next_kind, next_location,
+ next_representation);
+ Descriptor d(target_key, handle(target_descriptors->GetValue(i), isolate),
+ details);
new_descriptors->Set(i, &d);
}
}
@@ -2711,46 +2775,74 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
for (int i = target_nof; i < old_nof; ++i) {
PropertyDetails old_details = old_descriptors->GetDetails(i);
Handle<Name> old_key(old_descriptors->GetKey(i), isolate);
+
+ // Merge old_descriptor entry and modified details together.
+ PropertyKind next_kind;
+ PropertyAttributes next_attributes;
+ PropertyLocation next_location;
+ Representation next_representation;
+ bool property_kind_reconfiguration = false;
+
if (modify_index == i) {
- old_details = old_details.CopyWithRepresentation(
- new_representation.generalize(old_details.representation()));
- }
- if (old_details.type() == FIELD) {
- Handle<HeapType> old_field_type(
- old_descriptors->GetFieldType(i), isolate);
- if (modify_index == i) {
- old_field_type = GeneralizeFieldType(
- old_field_type, new_field_type, isolate);
+ DCHECK_EQ(FORCE_FIELD, store_mode);
+ // In case of property kind reconfiguration it is not necessary to
+ // take into account representation/field type of the old descriptor.
+ property_kind_reconfiguration = old_details.kind() != new_kind;
+
+ next_kind = new_kind;
+ next_attributes = new_attributes;
+ next_location = kField;
+ next_representation = new_representation;
+ if (!property_kind_reconfiguration) {
+ next_representation =
+ next_representation.generalize(old_details.representation());
}
- FieldDescriptor d(old_key, current_offset, old_field_type,
- old_details.attributes(), old_details.representation());
- current_offset += d.GetDetails().field_width_in_words();
- new_descriptors->Set(i, &d);
} else {
- DCHECK(old_details.type() == CONSTANT || old_details.type() == CALLBACKS);
- if (modify_index == i && store_mode == FORCE_IN_OBJECT) {
- FieldDescriptor d(
- old_key, current_offset,
- GeneralizeFieldType(old_descriptors->GetValue(i)->OptimalType(
- isolate, old_details.representation()),
- new_field_type, isolate),
- old_details.attributes(), old_details.representation());
+ next_kind = old_details.kind();
+ next_attributes = old_details.attributes();
+ next_location = old_details.location();
+ next_representation = old_details.representation();
+ }
+
+ if (next_location == kField) {
+ if (next_kind == kData) {
+ Handle<HeapType> next_field_type;
+ if (modify_index == i) {
+ next_field_type = new_field_type;
+ if (!property_kind_reconfiguration) {
+ Handle<HeapType> old_field_type =
+ GetFieldType(isolate, old_descriptors, i,
+ old_details.location(), next_representation);
+ next_field_type =
+ GeneralizeFieldType(next_field_type, old_field_type, isolate);
+ }
+ } else {
+ Handle<HeapType> old_field_type =
+ GetFieldType(isolate, old_descriptors, i, old_details.location(),
+ next_representation);
+ next_field_type = old_field_type;
+ }
+
+ DataDescriptor d(old_key, current_offset, next_field_type,
+ next_attributes, next_representation);
current_offset += d.GetDetails().field_width_in_words();
new_descriptors->Set(i, &d);
} else {
- DCHECK_NE(FIELD, old_details.type());
- Descriptor d(old_key,
- handle(old_descriptors->GetValue(i), isolate),
- old_details);
- new_descriptors->Set(i, &d);
+ UNIMPLEMENTED(); // TODO(ishell): implement.
}
+ } else {
+ PropertyDetails details(next_attributes, next_kind, next_location,
+ next_representation);
+ Descriptor d(old_key, handle(old_descriptors->GetValue(i), isolate),
+ details);
+ new_descriptors->Set(i, &d);
}
}
new_descriptors->Sort();
- DCHECK(store_mode != FORCE_IN_OBJECT ||
- new_descriptors->GetDetails(modify_index).type() == FIELD);
+ DCHECK(store_mode != FORCE_FIELD ||
+ new_descriptors->GetDetails(modify_index).location() == kField);
Handle<Map> split_map(root_map->FindLastMatchMap(
root_nof, old_nof, *new_descriptors), isolate);
@@ -2759,34 +2851,48 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
Handle<LayoutDescriptor> new_layout_descriptor =
LayoutDescriptor::New(split_map, new_descriptors, old_nof);
- PropertyDetails split_prop_details = old_descriptors->GetDetails(split_nof);
+
+ PropertyKind split_kind;
+ PropertyAttributes split_attributes;
+ if (modify_index == split_nof) {
+ split_kind = new_kind;
+ split_attributes = new_attributes;
+ } else {
+ PropertyDetails split_prop_details = old_descriptors->GetDetails(split_nof);
+ split_kind = split_prop_details.kind();
+ split_attributes = split_prop_details.attributes();
+ }
bool transition_target_deprecated = split_map->DeprecateTarget(
- split_prop_details.kind(), old_descriptors->GetKey(split_nof),
- split_prop_details.attributes(), *new_descriptors,
- *new_layout_descriptor);
+ split_kind, old_descriptors->GetKey(split_nof), split_attributes,
+ *new_descriptors, *new_layout_descriptor);
// If |transition_target_deprecated| is true then the transition array
// already contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
if (!transition_target_deprecated && !split_map->CanHaveMoreTransitions()) {
return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ new_kind, new_attributes,
"GenAll_CantHaveMoreTransitions");
}
- if (FLAG_trace_generalization) {
+ if (FLAG_trace_generalization && modify_index >= 0) {
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
PropertyDetails new_details = new_descriptors->GetDetails(modify_index);
- Handle<HeapType> old_field_type = (old_details.type() == FIELD)
- ? handle(old_descriptors->GetFieldType(modify_index), isolate)
- : HeapType::Constant(handle(old_descriptors->GetValue(modify_index),
- isolate), isolate);
- Handle<HeapType> new_field_type = (new_details.type() == FIELD)
- ? handle(new_descriptors->GetFieldType(modify_index), isolate)
- : HeapType::Constant(handle(new_descriptors->GetValue(modify_index),
- isolate), isolate);
+ Handle<HeapType> old_field_type =
+ (old_details.type() == DATA)
+ ? handle(old_descriptors->GetFieldType(modify_index), isolate)
+ : HeapType::Constant(
+ handle(old_descriptors->GetValue(modify_index), isolate),
+ isolate);
+ Handle<HeapType> new_field_type =
+ (new_details.type() == DATA)
+ ? handle(new_descriptors->GetFieldType(modify_index), isolate)
+ : HeapType::Constant(
+ handle(new_descriptors->GetValue(modify_index), isolate),
+ isolate);
old_map->PrintGeneralization(
stdout, "", modify_index, split_nof, old_nof,
- old_details.type() == CONSTANT && store_mode == FORCE_IN_OBJECT,
+ old_details.location() == kDescriptor && store_mode == FORCE_FIELD,
old_details.representation(), new_details.representation(),
*old_field_type, *new_field_type);
}
@@ -2802,15 +2908,16 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
}
-// Generalize the representation of all FIELD descriptors.
+// Generalize the representation of all DATA descriptors.
Handle<Map> Map::GeneralizeAllFieldRepresentations(
Handle<Map> map) {
Handle<DescriptorArray> descriptors(map->instance_descriptors());
for (int i = 0; i < map->NumberOfOwnDescriptors(); ++i) {
- if (descriptors->GetDetails(i).type() == FIELD) {
- map = GeneralizeRepresentation(map, i, Representation::Tagged(),
- HeapType::Any(map->GetIsolate()),
- FORCE_IN_OBJECT);
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() == DATA) {
+ map = ReconfigureProperty(map, i, kData, details.attributes(),
+ Representation::Tagged(),
+ HeapType::Any(map->GetIsolate()), FORCE_FIELD);
}
}
return map;
@@ -2834,9 +2941,9 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> map) {
// static
Handle<Map> Map::Update(Handle<Map> map) {
if (!map->is_deprecated()) return map;
- return GeneralizeRepresentation(map, 0, Representation::None(),
- HeapType::None(map->GetIsolate()),
- ALLOW_IN_DESCRIPTOR);
+ return ReconfigureProperty(map, -1, kData, NONE, Representation::None(),
+ HeapType::None(map->GetIsolate()),
+ ALLOW_IN_DESCRIPTOR);
}
@@ -2874,27 +2981,27 @@ MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
Object* new_value = new_descriptors->GetValue(i);
Object* old_value = old_descriptors->GetValue(i);
switch (new_details.type()) {
- case FIELD: {
+ case DATA: {
PropertyType old_type = old_details.type();
- if (old_type == FIELD) {
+ if (old_type == DATA) {
if (!HeapType::cast(old_value)->NowIs(HeapType::cast(new_value))) {
return MaybeHandle<Map>();
}
} else {
- DCHECK(old_type == CONSTANT);
+ DCHECK(old_type == DATA_CONSTANT);
if (!HeapType::cast(new_value)->NowContains(old_value)) {
return MaybeHandle<Map>();
}
}
break;
}
- case ACCESSOR_FIELD:
+ case ACCESSOR:
DCHECK(HeapType::Any()->Is(HeapType::cast(new_value)));
break;
- case CONSTANT:
- case CALLBACKS:
- if (old_details.location() == IN_OBJECT || old_value != new_value) {
+ case DATA_CONSTANT:
+ case ACCESSOR_CONSTANT:
+ if (old_details.location() == kField || old_value != new_value) {
return MaybeHandle<Map>();
}
break;
@@ -2933,22 +3040,24 @@ MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
MaybeHandle<Object> Object::SetProperty(Handle<Object> object,
Handle<Name> name, Handle<Object> value,
- StrictMode strict_mode,
+ LanguageMode language_mode,
StoreFromKeyed store_mode) {
LookupIterator it(object, name);
- return SetProperty(&it, value, strict_mode, store_mode);
+ return SetProperty(&it, value, language_mode, store_mode);
}
-MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
- Handle<Object> value,
- StrictMode strict_mode,
- StoreFromKeyed store_mode,
- StorePropertyMode data_store_mode) {
+MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
+ Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode,
+ bool* found) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc(it->isolate());
+ *found = true;
+
bool done = false;
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -2961,20 +3070,20 @@ MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
// until we find the property.
if (it->HasAccess(v8::ACCESS_SET)) break;
return JSObject::SetPropertyWithFailedAccessCheck(it, value,
- strict_mode);
+ language_mode);
case LookupIterator::JSPROXY:
if (it->HolderIsReceiverOrHiddenPrototype()) {
return JSProxy::SetPropertyWithHandler(it->GetHolder<JSProxy>(),
it->GetReceiver(), it->name(),
- value, strict_mode);
+ value, language_mode);
} else {
// TODO(verwaest): Use the MaybeHandle to indicate result.
bool has_result = false;
MaybeHandle<Object> maybe_result =
JSProxy::SetPropertyViaPrototypesWithHandler(
it->GetHolder<JSProxy>(), it->GetReceiver(), it->name(),
- value, strict_mode, &has_result);
+ value, language_mode, &has_result);
if (has_result) return maybe_result;
done = true;
}
@@ -2993,27 +3102,22 @@ MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
if (!maybe_attributes.has_value) return MaybeHandle<Object>();
done = maybe_attributes.value != ABSENT;
if (done && (maybe_attributes.value & READ_ONLY) != 0) {
- return WriteToReadOnlyProperty(it, value, strict_mode);
+ return WriteToReadOnlyProperty(it, value, language_mode);
}
}
break;
case LookupIterator::ACCESSOR:
if (it->property_details().IsReadOnly()) {
- return WriteToReadOnlyProperty(it, value, strict_mode);
- }
- if (it->HolderIsReceiverOrHiddenPrototype() ||
- !it->GetAccessors()->IsDeclaredAccessorInfo()) {
- return SetPropertyWithAccessor(it->GetReceiver(), it->name(), value,
- it->GetHolder<JSObject>(),
- it->GetAccessors(), strict_mode);
+ return WriteToReadOnlyProperty(it, value, language_mode);
}
- done = true;
- break;
+ return SetPropertyWithAccessor(it->GetReceiver(), it->name(), value,
+ it->GetHolder<JSObject>(),
+ it->GetAccessors(), language_mode);
case LookupIterator::DATA:
if (it->property_details().IsReadOnly()) {
- return WriteToReadOnlyProperty(it, value, strict_mode);
+ return WriteToReadOnlyProperty(it, value, language_mode);
}
if (it->HolderIsReceiverOrHiddenPrototype()) {
return SetDataProperty(it, value);
@@ -3032,34 +3136,103 @@ MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
// If the receiver is the JSGlobalObject, the store was contextual. In case
// the property did not exist yet on the global object itself, we have to
// throw a reference error in strict mode.
- if (it->GetReceiver()->IsJSGlobalObject() && strict_mode == STRICT) {
- Handle<Object> args[1] = {it->name()};
- THROW_NEW_ERROR(it->isolate(),
- NewReferenceError("not_defined", HandleVector(args, 1)),
- Object);
+ if (it->GetReceiver()->IsJSGlobalObject() && is_strict(language_mode)) {
+ Handle<Object> args[] = {it->name()};
+ THROW_NEW_ERROR(
+ it->isolate(),
+ NewReferenceError("not_defined", HandleVector(args, arraysize(args))),
+ Object);
}
- if (data_store_mode == SUPER_PROPERTY) {
- LookupIterator own_lookup(it->GetReceiver(), it->name(),
- LookupIterator::OWN);
+ *found = false;
+ return MaybeHandle<Object>();
+}
+
+
+MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
+ Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode) {
+ bool found = false;
+ MaybeHandle<Object> result =
+ SetPropertyInternal(it, value, language_mode, store_mode, &found);
+ if (found) return result;
+ return AddDataProperty(it, value, NONE, language_mode, store_mode);
+}
+
+
+MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
+ Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode) {
+ bool found = false;
+ MaybeHandle<Object> result =
+ SetPropertyInternal(it, value, language_mode, store_mode, &found);
+ if (found) return result;
+
+ LookupIterator own_lookup(it->GetReceiver(), it->name(), LookupIterator::OWN);
+
+ switch (own_lookup.state()) {
+ case LookupIterator::NOT_FOUND:
+ return JSObject::AddDataProperty(&own_lookup, value, NONE, language_mode,
+ store_mode);
+
+ case LookupIterator::DATA: {
+ PropertyDetails details = own_lookup.property_details();
+ if (details.IsConfigurable() || !details.IsReadOnly()) {
+ return JSObject::SetOwnPropertyIgnoreAttributes(
+ Handle<JSObject>::cast(it->GetReceiver()), it->name(), value,
+ details.attributes());
+ }
+ return WriteToReadOnlyProperty(&own_lookup, value, language_mode);
+ }
- return JSObject::SetProperty(&own_lookup, value, strict_mode, store_mode,
- NORMAL_PROPERTY);
+ case LookupIterator::ACCESSOR: {
+ PropertyDetails details = own_lookup.property_details();
+ if (details.IsConfigurable()) {
+ return JSObject::SetOwnPropertyIgnoreAttributes(
+ Handle<JSObject>::cast(it->GetReceiver()), it->name(), value,
+ details.attributes());
+ }
+
+ return RedefineNonconfigurableProperty(it->isolate(), it->name(), value,
+ language_mode);
+ }
+
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ break;
+
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::ACCESS_CHECK: {
+ bool found = false;
+ MaybeHandle<Object> result = SetPropertyInternal(
+ &own_lookup, value, language_mode, store_mode, &found);
+ if (found) return result;
+ return SetDataProperty(&own_lookup, value);
+ }
}
- return AddDataProperty(it, value, NONE, strict_mode, store_mode);
+ UNREACHABLE();
+ return MaybeHandle<Object>();
}
-MaybeHandle<Object> Object::WriteToReadOnlyProperty(LookupIterator* it,
- Handle<Object> value,
- StrictMode strict_mode) {
- if (strict_mode != STRICT) return value;
+MaybeHandle<Object> Object::WriteToReadOnlyProperty(
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode) {
+ return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(), it->name(),
+ value, language_mode);
+}
+
- Handle<Object> args[] = {it->name(), it->GetReceiver()};
- THROW_NEW_ERROR(it->isolate(),
- NewTypeError("strict_read_only_property",
- HandleVector(args, arraysize(args))),
+MaybeHandle<Object> Object::WriteToReadOnlyProperty(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
+ Handle<Object> value, LanguageMode language_mode) {
+ if (is_sloppy(language_mode)) return value;
+ Handle<Object> args[] = {name, receiver};
+ THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
+ HandleVector(args, arraysize(args))),
Object);
}
@@ -3068,12 +3241,19 @@ MaybeHandle<Object> Object::WriteToReadOnlyElement(Isolate* isolate,
Handle<Object> receiver,
uint32_t index,
Handle<Object> value,
- StrictMode strict_mode) {
- if (strict_mode != STRICT) return value;
+ LanguageMode language_mode) {
+ return WriteToReadOnlyProperty(isolate, receiver,
+ isolate->factory()->NewNumberFromUint(index),
+ value, language_mode);
+}
- Handle<Object> args[] = {isolate->factory()->NewNumberFromUint(index),
- receiver};
- THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
+
+MaybeHandle<Object> Object::RedefineNonconfigurableProperty(
+ Isolate* isolate, Handle<Object> name, Handle<Object> value,
+ LanguageMode language_mode) {
+ if (is_sloppy(language_mode)) return value;
+ Handle<Object> args[] = {name};
+ THROW_NEW_ERROR(isolate, NewTypeError("redefine_disallowed",
HandleVector(args, arraysize(args))),
Object);
}
@@ -3118,12 +3298,12 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
Handle<Object> value,
PropertyAttributes attributes,
- StrictMode strict_mode,
+ LanguageMode language_mode,
StoreFromKeyed store_mode) {
DCHECK(!it->GetReceiver()->IsJSProxy());
if (!it->GetReceiver()->IsJSObject()) {
// TODO(verwaest): Throw a TypeError with a more specific message.
- return WriteToReadOnlyProperty(it, value, strict_mode);
+ return WriteToReadOnlyProperty(it, value, language_mode);
}
Handle<JSObject> receiver = it->GetStoreTarget();
@@ -3140,9 +3320,9 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
// |value| under it->name() with |attributes|.
it->PrepareTransitionToDataProperty(value, attributes, store_mode);
if (it->state() != LookupIterator::TRANSITION) {
- if (strict_mode == SLOPPY) return value;
+ if (is_sloppy(language_mode)) return value;
- Handle<Object> args[1] = {it->name()};
+ Handle<Object> args[] = {it->name()};
THROW_NEW_ERROR(it->isolate(),
NewTypeError("object_not_extensible",
HandleVector(args, arraysize(args))),
@@ -3154,6 +3334,7 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
if (receiver->map()->is_dictionary_map()) {
// TODO(verwaest): Probably should ensure this is done beforehand.
it->InternalizeName();
+ // TODO(dcarney): just populate TransitionPropertyCell here?
JSObject::AddSlowProperty(receiver, it->name(), value, attributes);
} else {
// Write the property value.
@@ -3174,34 +3355,42 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
MaybeHandle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- bool* found,
- StrictMode strict_mode) {
- Isolate *isolate = object->GetIsolate();
+ Handle<JSObject> object, uint32_t index, Handle<Object> value, bool* found,
+ LanguageMode language_mode) {
+ Isolate* isolate = object->GetIsolate();
for (PrototypeIterator iter(isolate, object); !iter.IsAtEnd();
iter.Advance()) {
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
return JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), object,
isolate->factory()->Uint32ToString(index), // name
- value, strict_mode, found);
+ value, language_mode, found);
}
Handle<JSObject> js_proto =
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+
+ if (js_proto->IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccess(js_proto, index, v8::ACCESS_SET)) {
+ *found = true;
+ isolate->ReportFailedAccessCheck(js_proto, v8::ACCESS_SET);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return MaybeHandle<Object>();
+ }
+ }
+
if (!js_proto->HasDictionaryElements()) {
continue;
}
+
Handle<SeededNumberDictionary> dictionary(js_proto->element_dictionary());
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
+ if (details.type() == ACCESSOR_CONSTANT) {
*found = true;
Handle<Object> structure(dictionary->ValueAt(entry), isolate);
return SetElementWithCallback(object, structure, index, value, js_proto,
- strict_mode);
+ language_mode);
}
}
}
@@ -3304,7 +3493,7 @@ struct DescriptorArrayAppender {
int valid_descriptors,
Handle<DescriptorArray> array) {
DisallowHeapAllocation no_gc;
- CallbacksDescriptor desc(key, entry, entry->property_attributes());
+ AccessorConstantDescriptor desc(key, entry, entry->property_attributes());
array->Append(&desc);
}
};
@@ -3473,19 +3662,21 @@ static Handle<Map> AddMissingElementsTransitions(Handle<Map> map,
Handle<Map> current_map = map;
ElementsKind kind = map->elements_kind();
- if (!map->is_prototype_map()) {
+ TransitionFlag flag;
+ if (map->is_prototype_map()) {
+ flag = OMIT_TRANSITION;
+ } else {
+ flag = INSERT_TRANSITION;
while (kind != to_kind && !IsTerminalElementsKind(kind)) {
kind = GetNextTransitionElementsKind(kind);
- current_map =
- Map::CopyAsElementsKind(current_map, kind, INSERT_TRANSITION);
+ current_map = Map::CopyAsElementsKind(current_map, kind, flag);
}
}
// In case we are exiting the fast elements kind system, just add the map in
// the end.
if (kind != to_kind) {
- current_map = Map::CopyAsElementsKind(
- current_map, to_kind, INSERT_TRANSITION);
+ current_map = Map::CopyAsElementsKind(current_map, to_kind, flag);
}
DCHECK(current_map->elements_kind() == to_kind);
@@ -3582,11 +3773,9 @@ Maybe<bool> JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy,
}
-MaybeHandle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
- Handle<Object> receiver,
- Handle<Name> name,
- Handle<Object> value,
- StrictMode strict_mode) {
+MaybeHandle<Object> JSProxy::SetPropertyWithHandler(
+ Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
+ Handle<Object> value, LanguageMode language_mode) {
Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
@@ -3608,7 +3797,7 @@ MaybeHandle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, StrictMode strict_mode, bool* done) {
+ Handle<Object> value, LanguageMode language_mode, bool* done) {
Isolate* isolate = proxy->GetIsolate();
Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
@@ -3679,11 +3868,8 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
DCHECK(writable->IsBoolean());
*done = writable->IsFalse();
if (!*done) return isolate->factory()->the_hole_value();
- if (strict_mode == SLOPPY) return value;
- Handle<Object> args[] = { name, receiver };
- THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
- HandleVector(args, arraysize(args))),
- Object);
+ return WriteToReadOnlyProperty(isolate, receiver, name, value,
+ language_mode);
}
// We have an AccessorDescriptor.
@@ -3696,7 +3882,7 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
receiver, Handle<JSReceiver>::cast(setter), value);
}
- if (strict_mode == SLOPPY) return value;
+ if (is_sloppy(language_mode)) return value;
Handle<Object> args2[] = { name, proxy };
THROW_NEW_ERROR(isolate, NewTypeError("no_setter_in_callback",
HandleVector(args2, arraysize(args2))),
@@ -3705,7 +3891,7 @@ MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
MaybeHandle<Object> JSProxy::DeletePropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Name> name, DeleteMode mode) {
+ Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode) {
Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
@@ -3723,7 +3909,7 @@ MaybeHandle<Object> JSProxy::DeletePropertyWithHandler(
Object);
bool result_bool = result->BooleanValue();
- if (mode == STRICT_DELETION && !result_bool) {
+ if (is_strict(language_mode) && !result_bool) {
Handle<Object> handler(proxy->handler(), isolate);
Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("delete"));
@@ -3737,10 +3923,10 @@ MaybeHandle<Object> JSProxy::DeletePropertyWithHandler(
MaybeHandle<Object> JSProxy::DeleteElementWithHandler(
- Handle<JSProxy> proxy, uint32_t index, DeleteMode mode) {
+ Handle<JSProxy> proxy, uint32_t index, LanguageMode language_mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return JSProxy::DeletePropertyWithHandler(proxy, name, mode);
+ return JSProxy::DeletePropertyWithHandler(proxy, name, language_mode);
}
@@ -3934,7 +4120,7 @@ void JSObject::WriteToField(int descriptor, Object* value) {
DescriptorArray* desc = map()->instance_descriptors();
PropertyDetails details = desc->GetDetails(descriptor);
- DCHECK(details.type() == FIELD);
+ DCHECK(details.type() == DATA);
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
if (details.representation().IsDouble()) {
@@ -4043,7 +4229,6 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
}
it.ReconfigureDataProperty(value, attributes);
- it.PrepareForDataProperty(value);
value = it.WriteDataValue(value);
if (is_observed) {
@@ -4068,7 +4253,6 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
if (is_observed) old_value = it.GetDataValue();
it.ReconfigureDataProperty(value, attributes);
- it.PrepareForDataProperty(value);
value = it.WriteDataValue(value);
if (is_observed) {
@@ -4184,9 +4368,8 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributeWithReceiver(
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<PropertyAttributes>());
- return maybe(ABSENT);
+ return GetElementAttributesWithFailedAccessCheck(isolate, object,
+ receiver, index);
}
}
@@ -4263,8 +4446,8 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributeFromInterceptor(
Maybe<PropertyAttributes> JSObject::GetElementAttributeWithoutInterceptor(
Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index,
bool check_prototype) {
- PropertyAttributes attr = object->GetElementsAccessor()->GetAttributes(
- receiver, object, index);
+ PropertyAttributes attr =
+ object->GetElementsAccessor()->GetAttributes(object, index);
if (attr != ABSENT) return maybe(attr);
// Handle [] on String objects.
@@ -4372,13 +4555,13 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
PropertyDetails details = descs->GetDetails(i);
Handle<Name> key(descs->GetKey(i));
switch (details.type()) {
- case CONSTANT: {
+ case DATA_CONSTANT: {
Handle<Object> value(descs->GetConstant(i), isolate);
- PropertyDetails d(details.attributes(), FIELD, i + 1);
+ PropertyDetails d(details.attributes(), DATA, i + 1);
dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
- case FIELD: {
+ case DATA: {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
Handle<Object> value;
if (object->IsUnboxedDoubleField(index)) {
@@ -4392,20 +4575,20 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
value = isolate->factory()->NewHeapNumber(old->value());
}
}
- PropertyDetails d(details.attributes(), FIELD, i + 1);
+ PropertyDetails d(details.attributes(), DATA, i + 1);
dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
- case ACCESSOR_FIELD: {
+ case ACCESSOR: {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
Handle<Object> value(object->RawFastPropertyAt(index), isolate);
- PropertyDetails d(details.attributes(), CALLBACKS, i + 1);
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1);
dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
- case CALLBACKS: {
+ case ACCESSOR_CONSTANT: {
Handle<Object> value(descs->GetCallbacksObject(i), isolate);
- PropertyDetails d(details.attributes(), CALLBACKS, i + 1);
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1);
dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
@@ -4489,7 +4672,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Object* value = dictionary->ValueAt(index);
PropertyType type = dictionary->DetailsAt(index).type();
- if (type == FIELD && !value->IsJSFunction()) {
+ if (type == DATA && !value->IsJSFunction()) {
number_of_fields += 1;
}
}
@@ -4558,9 +4741,10 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
PropertyType type = details.type();
if (value->IsJSFunction()) {
- ConstantDescriptor d(key, handle(value, isolate), details.attributes());
+ DataConstantDescriptor d(key, handle(value, isolate),
+ details.attributes());
descriptors->Set(enumeration_index - 1, &d);
- } else if (type == FIELD) {
+ } else if (type == DATA) {
if (current_offset < inobject_props) {
object->InObjectPropertyAtPut(current_offset, value,
UPDATE_WRITE_BARRIER);
@@ -4568,13 +4752,14 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
int offset = current_offset - inobject_props;
fields->set(offset, value);
}
- FieldDescriptor d(key, current_offset, details.attributes(),
- // TODO(verwaest): value->OptimalRepresentation();
- Representation::Tagged());
+ DataDescriptor d(key, current_offset, details.attributes(),
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged());
current_offset += d.GetDetails().field_width_in_words();
descriptors->Set(enumeration_index - 1, &d);
- } else if (type == CALLBACKS) {
- CallbacksDescriptor d(key, handle(value, isolate), details.attributes());
+ } else if (type == ACCESSOR_CONSTANT) {
+ AccessorConstantDescriptor d(key, handle(value, isolate),
+ details.attributes());
descriptors->Set(enumeration_index - 1, &d);
} else {
UNREACHABLE();
@@ -4636,7 +4821,7 @@ static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
value = handle(Handle<FixedArray>::cast(array)->get(i), isolate);
}
if (!value->IsTheHole()) {
- PropertyDetails details(NONE, FIELD, 0);
+ PropertyDetails details(NONE, DATA, 0);
dictionary =
SeededNumberDictionary::AddNumberEntry(dictionary, i, value, details);
}
@@ -4905,7 +5090,7 @@ Object* JSObject::GetHiddenPropertiesHashTable() {
int sorted_index = descriptors->GetSortedKeyIndex(0);
if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
- DCHECK(descriptors->GetType(sorted_index) == FIELD);
+ DCHECK(descriptors->GetType(sorted_index) == DATA);
DCHECK(descriptors->GetDetails(sorted_index).representation().
IsCompatibleForLoad(Representation::Tagged()));
FieldIndex index = FieldIndex::ForDescriptor(this->map(),
@@ -5018,15 +5203,16 @@ MaybeHandle<Object> JSObject::DeleteElementWithInterceptor(
// Rebox CustomArguments::kReturnValueOffset before returning.
return handle(*result_internal, isolate);
}
- MaybeHandle<Object> delete_result = object->GetElementsAccessor()->Delete(
- object, index, NORMAL_DELETION);
+ // TODO(verwaest): Shouldn't this be the mode that was passed in?
+ MaybeHandle<Object> delete_result =
+ object->GetElementsAccessor()->Delete(object, index, SLOPPY);
return delete_result;
}
MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
uint32_t index,
- DeleteMode mode) {
+ LanguageMode language_mode) {
Isolate* isolate = object->GetIsolate();
Factory* factory = isolate->factory();
@@ -5039,12 +5225,13 @@ MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
}
if (object->IsStringObjectWithCharacterAt(index)) {
- if (mode == STRICT_DELETION) {
+ if (is_strict(language_mode)) {
// Deleting a non-configurable property in strict mode.
Handle<Object> name = factory->NewNumberFromUint(index);
- Handle<Object> args[2] = { name, object };
- THROW_NEW_ERROR(isolate, NewTypeError("strict_delete_property",
- HandleVector(args, 2)),
+ Handle<Object> args[] = {name, object};
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("strict_delete_property",
+ HandleVector(args, arraysize(args))),
Object);
}
return factory->false_value();
@@ -5056,7 +5243,7 @@ MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
return DeleteElement(
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index,
- mode);
+ language_mode);
}
Handle<Object> old_value;
@@ -5077,10 +5264,11 @@ MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
// Skip interceptor if forcing deletion.
MaybeHandle<Object> maybe_result;
- if (object->HasIndexedInterceptor() && mode != FORCE_DELETION) {
+ if (object->HasIndexedInterceptor()) {
maybe_result = DeleteElementWithInterceptor(object, index);
} else {
- maybe_result = object->GetElementsAccessor()->Delete(object, index, mode);
+ maybe_result =
+ object->GetElementsAccessor()->Delete(object, index, language_mode);
}
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result, maybe_result, Object);
@@ -5100,23 +5288,44 @@ MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
}
+void JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name) {
+ DCHECK(!object->HasFastProperties());
+ Isolate* isolate = object->GetIsolate();
+ Handle<NameDictionary> dictionary(object->property_dictionary());
+ int entry = dictionary->FindEntry(name);
+ DCHECK_NE(NameDictionary::kNotFound, entry);
+
+ // If we have a global object set the cell to the hole.
+ if (object->IsGlobalObject()) {
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ DCHECK(details.IsConfigurable());
+ Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
+ Handle<Object> value = isolate->factory()->the_hole_value();
+ PropertyCell::SetValueInferType(cell, value);
+ dictionary->DetailsAtPut(entry, details.AsDeleted());
+ return;
+ }
+
+ NameDictionary::DeleteProperty(dictionary, entry);
+ Handle<NameDictionary> new_properties =
+ NameDictionary::Shrink(dictionary, name);
+ object->set_properties(*new_properties);
+}
+
+
MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
Handle<Name> name,
- DeleteMode delete_mode) {
+ LanguageMode language_mode) {
// ECMA-262, 3rd, 8.6.2.5
DCHECK(name->IsName());
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- return DeleteElement(object, index, delete_mode);
+ return DeleteElement(object, index, language_mode);
}
- // Skip interceptors on FORCE_DELETION.
- LookupIterator::Configuration config =
- delete_mode == FORCE_DELETION ? LookupIterator::HIDDEN_SKIP_INTERCEPTOR
- : LookupIterator::HIDDEN;
-
- LookupIterator it(object, name, config);
+ LookupIterator it(object, name, LookupIterator::HIDDEN);
bool is_observed = object->map()->is_observed() &&
!it.isolate()->IsInternallyUsedPropertyName(name);
@@ -5150,10 +5359,10 @@ MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
}
// Fall through.
case LookupIterator::ACCESSOR: {
- if (delete_mode != FORCE_DELETION && !it.IsConfigurable()) {
+ if (!it.IsConfigurable()) {
// Fail if the property is not configurable.
- if (delete_mode == STRICT_DELETION) {
- Handle<Object> args[2] = {name, object};
+ if (is_strict(language_mode)) {
+ Handle<Object> args[] = {name, object};
THROW_NEW_ERROR(it.isolate(),
NewTypeError("strict_delete_property",
HandleVector(args, arraysize(args))),
@@ -5172,9 +5381,9 @@ MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
!(object->IsJSGlobalProxy() && holder->IsJSGlobalObject())) {
return it.isolate()->factory()->true_value();
}
+
NormalizeProperties(holder, mode, 0, "DeletingProperty");
- Handle<Object> result =
- DeleteNormalizedProperty(holder, name, delete_mode);
+ DeleteNormalizedProperty(holder, name);
ReoptimizeIfPrototype(holder);
if (is_observed) {
@@ -5183,7 +5392,7 @@ MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
EnqueueChangeRecord(object, "delete", name, old_value), Object);
}
- return result;
+ return it.isolate()->factory()->true_value();
}
}
}
@@ -5194,23 +5403,25 @@ MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
MaybeHandle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object,
uint32_t index,
- DeleteMode mode) {
+ LanguageMode language_mode) {
if (object->IsJSProxy()) {
- return JSProxy::DeleteElementWithHandler(
- Handle<JSProxy>::cast(object), index, mode);
+ return JSProxy::DeleteElementWithHandler(Handle<JSProxy>::cast(object),
+ index, language_mode);
}
- return JSObject::DeleteElement(Handle<JSObject>::cast(object), index, mode);
+ return JSObject::DeleteElement(Handle<JSObject>::cast(object), index,
+ language_mode);
}
MaybeHandle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
Handle<Name> name,
- DeleteMode mode) {
+ LanguageMode language_mode) {
if (object->IsJSProxy()) {
- return JSProxy::DeletePropertyWithHandler(
- Handle<JSProxy>::cast(object), name, mode);
+ return JSProxy::DeletePropertyWithHandler(Handle<JSProxy>::cast(object),
+ name, language_mode);
}
- return JSObject::DeleteProperty(Handle<JSObject>::cast(object), name, mode);
+ return JSObject::DeleteProperty(Handle<JSObject>::cast(object), name,
+ language_mode);
}
@@ -5446,7 +5657,7 @@ static void ApplyAttributesToDictionary(Dictionary* dictionary,
PropertyDetails details = dictionary->DetailsAt(i);
int attrs = attributes;
// READ_ONLY is an invalid attribute for JS setters/getters.
- if ((attributes & READ_ONLY) && details.type() == CALLBACKS) {
+ if ((attributes & READ_ONLY) && details.type() == ACCESSOR_CONSTANT) {
Object* v = dictionary->ValueAt(i);
if (v->IsPropertyCell()) v = PropertyCell::cast(v)->value();
if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
@@ -5686,7 +5897,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
int limit = copy->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
+ if (details.type() != DATA) continue;
FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
if (object->IsUnboxedDoubleField(index)) {
if (copying) {
@@ -5911,7 +6122,7 @@ int Map::NextFreePropertyIndex() {
DescriptorArray* descs = instance_descriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
PropertyDetails details = descs->GetDetails(i);
- if (details.type() == FIELD) {
+ if (details.location() == kField) {
int candidate = details.field_index() + details.field_width_in_words();
if (candidate > free_index) free_index = candidate;
}
@@ -6002,7 +6213,7 @@ static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
if (!(details.IsDontEnum() || key->IsSymbol())) {
storage->set(index, key);
if (!indices.is_null()) {
- if (details.type() != FIELD) {
+ if (details.type() != DATA) {
indices = Handle<FixedArray>();
} else {
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
@@ -6165,12 +6376,11 @@ static bool UpdateGetterSetterInDictionary(
if (entry != SeededNumberDictionary::kNotFound) {
Object* result = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS && result->IsAccessorPair()) {
+ if (details.type() == ACCESSOR_CONSTANT && result->IsAccessorPair()) {
DCHECK(details.IsConfigurable());
if (details.attributes() != attributes) {
dictionary->DetailsAtPut(
- entry,
- PropertyDetails(attributes, CALLBACKS, index));
+ entry, PropertyDetails(attributes, ACCESSOR_CONSTANT, index));
}
AccessorPair::cast(result)->SetComponents(getter, setter);
return true;
@@ -6272,14 +6482,14 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
Handle<Object> structure,
PropertyAttributes attributes) {
Heap* heap = object->GetHeap();
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
+ PropertyDetails details = PropertyDetails(attributes, ACCESSOR_CONSTANT, 0);
// Normalize elements to make this operation simple.
bool had_dictionary_elements = object->HasDictionaryElements();
Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
DCHECK(object->HasDictionaryElements() ||
object->HasDictionaryArgumentsElements());
- // Update the dictionary with the new CALLBACKS property.
+ // Update the dictionary with the new ACCESSOR_CONSTANT property.
dictionary = SeededNumberDictionary::Set(dictionary, index, structure,
details);
dictionary->set_requires_slow_elements();
@@ -6337,8 +6547,8 @@ void JSObject::SetPropertyCallback(Handle<JSObject> object,
Deoptimizer::DeoptimizeGlobalObject(*object);
}
- // Update the dictionary with the new CALLBACKS property.
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
+ // Update the dictionary with the new ACCESSOR_CONSTANT property.
+ PropertyDetails details = PropertyDetails(attributes, ACCESSOR_CONSTANT, 0);
SetNormalizedProperty(object, name, structure, details);
ReoptimizeIfPrototype(object);
@@ -6552,7 +6762,7 @@ MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
- if (dictionary->DetailsAt(entry).type() == CALLBACKS &&
+ if (dictionary->DetailsAt(entry).type() == ACCESSOR_CONSTANT &&
element->IsAccessorPair()) {
return handle(AccessorPair::cast(element)->GetComponent(component),
isolate);
@@ -6603,7 +6813,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
DescriptorArray* descs = map()->instance_descriptors();
bool value_is_number = value->IsNumber();
for (int i = 0; i < number_of_own_descriptors; i++) {
- if (descs->GetType(i) == FIELD) {
+ if (descs->GetType(i) == DATA) {
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
if (IsUnboxedDoubleField(field_index)) {
if (value_is_number) {
@@ -6623,7 +6833,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
return descs->GetKey(i);
}
}
- } else if (descs->GetType(i) == CONSTANT) {
+ } else if (descs->GetType(i) == DATA_CONSTANT) {
if (descs->GetConstant(i) == value) {
return descs->GetKey(i);
}
@@ -6776,7 +6986,7 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
Handle<LayoutDescriptor> layout_descriptor =
FLAG_unbox_double_fields
- ? LayoutDescriptor::Append(map, descriptor->GetDetails())
+ ? LayoutDescriptor::ShareAppend(map, descriptor->GetDetails())
: handle(LayoutDescriptor::FastPointerLayout(), map->GetIsolate());
{
@@ -6868,7 +7078,7 @@ Handle<Map> Map::CopyReplaceDescriptors(
int length = descriptors->number_of_descriptors();
for (int i = 0; i < length; i++) {
descriptors->SetRepresentation(i, Representation::Tagged());
- if (descriptors->GetDetails(i).type() == FIELD) {
+ if (descriptors->GetDetails(i).type() == DATA) {
descriptors->SetValue(i, HeapType::Any());
}
}
@@ -6907,7 +7117,7 @@ Handle<Map> Map::CopyInstallDescriptors(
int unused_property_fields = map->unused_property_fields();
PropertyDetails details = descriptors->GetDetails(new_descriptor);
- if (details.type() == FIELD) {
+ if (details.location() == kField) {
unused_property_fields = map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
unused_property_fields += JSObject::kFieldsAdded;
@@ -6920,7 +7130,14 @@ Handle<Map> Map::CopyInstallDescriptors(
LayoutDescriptor::AppendIfFastOrUseFull(map, details,
full_layout_descriptor);
result->set_layout_descriptor(*layout_descriptor);
+#ifdef VERIFY_HEAP
+ // TODO(ishell): remove these checks from VERIFY_HEAP mode.
+ if (FLAG_verify_heap) {
+ CHECK(result->layout_descriptor()->IsConsistentWithMap(*result));
+ }
+#else
SLOW_DCHECK(result->layout_descriptor()->IsConsistentWithMap(*result));
+#endif
result->set_visitor_id(StaticVisitorBase::GetVisitorId(*result));
}
@@ -6950,31 +7167,18 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
map->CanHaveMoreTransitions() &&
!map->HasElementsTransition();
- if (insert_transition && map->owns_descriptors()) {
- // In case the map owned its own descriptors, share the descriptors and
- // transfer ownership to the new map.
- Handle<Map> new_map = CopyDropDescriptors(map);
+ if (insert_transition) {
+ Handle<Map> new_map = CopyForTransition(map, "CopyAsElementsKind");
+ new_map->set_elements_kind(kind);
ConnectElementsTransition(map, new_map);
- new_map->set_elements_kind(kind);
- // The properties did not change, so reuse descriptors.
- new_map->InitializeDescriptors(map->instance_descriptors(),
- map->GetLayoutDescriptor());
return new_map;
}
- // In case the map did not own its own descriptors, a split is forced by
- // copying the map; creating a new descriptor array cell.
// Create a new free-floating map only if we are not allowed to store it.
Handle<Map> new_map = Copy(map, "CopyAsElementsKind");
-
new_map->set_elements_kind(kind);
-
- if (insert_transition) {
- ConnectElementsTransition(map, new_map);
- }
-
return new_map;
}
@@ -6984,27 +7188,55 @@ Handle<Map> Map::CopyForObserved(Handle<Map> map) {
Isolate* isolate = map->GetIsolate();
- // In case the map owned its own descriptors, share the descriptors and
- // transfer ownership to the new map.
- Handle<Map> new_map;
- if (map->owns_descriptors()) {
- new_map = CopyDropDescriptors(map);
- } else {
- DCHECK(!map->is_prototype_map());
- new_map = Copy(map, "CopyForObserved");
+ bool insert_transition =
+ map->CanHaveMoreTransitions() && !map->is_prototype_map();
+
+ if (insert_transition) {
+ Handle<Map> new_map = CopyForTransition(map, "CopyForObserved");
+ new_map->set_is_observed();
+
+ Handle<Name> name = isolate->factory()->observed_symbol();
+ ConnectTransition(map, new_map, name, SPECIAL_TRANSITION);
+ return new_map;
}
+ // Create a new free-floating map only if we are not allowed to store it.
+ Handle<Map> new_map = Map::Copy(map, "CopyForObserved");
new_map->set_is_observed();
+ return new_map;
+}
+
+
+Handle<Map> Map::CopyForTransition(Handle<Map> map, const char* reason) {
+ DCHECK(!map->is_prototype_map());
+ Handle<Map> new_map = CopyDropDescriptors(map);
+
if (map->owns_descriptors()) {
+ // In case the map owned its own descriptors, share the descriptors and
+ // transfer ownership to the new map.
// The properties did not change, so reuse descriptors.
new_map->InitializeDescriptors(map->instance_descriptors(),
map->GetLayoutDescriptor());
+ } else {
+ // In case the map did not own its own descriptors, a split is forced by
+ // copying the map; creating a new descriptor array cell.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpTo(descriptors, number_of_own_descriptors);
+ Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
+ map->GetIsolate());
+ new_map->InitializeDescriptors(*new_descriptors, *new_layout_descriptor);
}
- if (map->CanHaveMoreTransitions()) {
- Handle<Name> name = isolate->factory()->observed_symbol();
- ConnectTransition(map, new_map, name, SPECIAL_TRANSITION);
+#if TRACE_MAPS
+ if (FLAG_trace_maps) {
+ PrintF("[TraceMaps: CopyForTransition from= %p to= %p reason= %s ]\n",
+ reinterpret_cast<void*>(*map), reinterpret_cast<void*>(*new_map),
+ reason);
}
+#endif
+
return new_map;
}
@@ -7071,17 +7303,17 @@ Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
PropertyDetails details = GetDetails(descriptor);
switch (details.type()) {
- case FIELD:
+ case DATA:
return value->FitsRepresentation(details.representation()) &&
GetFieldType(descriptor)->NowContains(value);
- case CONSTANT:
+ case DATA_CONSTANT:
DCHECK(GetConstant(descriptor) != value ||
value->FitsRepresentation(details.representation()));
return GetConstant(descriptor) == value;
- case ACCESSOR_FIELD:
- case CALLBACKS:
+ case ACCESSOR:
+ case ACCESSOR_CONSTANT:
return false;
}
@@ -7103,11 +7335,13 @@ Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
if (descriptors->CanHoldValue(descriptor, *value)) return map;
Isolate* isolate = map->GetIsolate();
+ PropertyAttributes attributes =
+ descriptors->GetDetails(descriptor).attributes();
Representation representation = value->OptimalRepresentation();
Handle<HeapType> type = value->OptimalType(isolate, representation);
- return GeneralizeRepresentation(map, descriptor, representation, type,
- FORCE_IN_OBJECT);
+ return ReconfigureProperty(map, descriptor, kData, attributes, representation,
+ type, FORCE_FIELD);
}
@@ -7121,7 +7355,7 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
// Migrate to the newest map before storing the property.
map = Update(map);
- int index = map->SearchTransition(DATA, *name, attributes);
+ int index = map->SearchTransition(kData, *name, attributes);
if (index != TransitionArray::kNotFound) {
Handle<Map> transition(map->GetTransition(index));
int descriptor = transition->LastAdded();
@@ -7164,16 +7398,29 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
}
-Handle<Map> Map::ReconfigureDataProperty(Handle<Map> map, int descriptor,
- PropertyAttributes attributes) {
+Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
+ PropertyKind kind,
+ PropertyAttributes attributes) {
// Dictionaries have to be reconfigured in-place.
DCHECK(!map->is_dictionary_map());
- // For now, give up on transitioning and just create a unique map.
- // TODO(verwaest/ishell): Cache transitions with different attributes.
- return CopyGeneralizeAllRepresentations(map, descriptor, FORCE_IN_OBJECT,
- attributes,
- "GenAll_AttributesMismatch");
+ if (!map->GetBackPointer()->IsMap()) {
+ // There is no benefit from reconstructing transition tree for maps without
+ // back pointers.
+ return CopyGeneralizeAllRepresentations(
+ map, descriptor, FORCE_FIELD, kind, attributes,
+ "GenAll_AttributesMismatchProtoMap");
+ }
+
+ if (FLAG_trace_generalization) {
+ map->PrintReconfiguration(stdout, descriptor, kind, attributes);
+ }
+
+ Isolate* isolate = map->GetIsolate();
+ Handle<Map> new_map = ReconfigureProperty(
+ map, descriptor, kind, attributes, Representation::None(),
+ HeapType::None(isolate), FORCE_FIELD);
+ return new_map;
}
@@ -7199,14 +7446,14 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
- int index = map->SearchTransition(ACCESSOR, *name, attributes);
+ int index = map->SearchTransition(kAccessor, *name, attributes);
if (index != TransitionArray::kNotFound) {
Handle<Map> transition(map->GetTransition(index));
DescriptorArray* descriptors = transition->instance_descriptors();
int descriptor = transition->LastAdded();
DCHECK(descriptors->GetKey(descriptor)->Equals(*name));
- DCHECK_EQ(ACCESSOR, descriptors->GetDetails(descriptor).kind());
+ DCHECK_EQ(kAccessor, descriptors->GetDetails(descriptor).kind());
DCHECK_EQ(attributes, descriptors->GetDetails(descriptor).attributes());
Handle<Object> maybe_pair(descriptors->GetValue(descriptor), isolate);
@@ -7230,7 +7477,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
}
PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
- if (old_details.type() != CALLBACKS) {
+ if (old_details.type() != ACCESSOR_CONSTANT) {
return Map::Normalize(map, mode, "AccessorsOverwritingNonAccessors");
}
@@ -7260,7 +7507,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
pair->set(component, *accessor);
TransitionFlag flag = INSERT_TRANSITION;
- CallbacksDescriptor new_desc(name, pair, attributes);
+ AccessorConstantDescriptor new_desc(name, pair, attributes);
return Map::CopyInsertDescriptor(map, &new_desc, flag);
}
@@ -7279,13 +7526,14 @@ Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
return ShareDescriptor(map, descriptors, descriptor);
}
- Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
- descriptors, map->NumberOfOwnDescriptors(), 1);
+ int nof = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpTo(descriptors, nof, 1);
new_descriptors->Append(descriptor);
Handle<LayoutDescriptor> new_layout_descriptor =
FLAG_unbox_double_fields
- ? LayoutDescriptor::Append(map, descriptor->GetDetails())
+ ? LayoutDescriptor::New(map, new_descriptors, nof + 1)
: handle(LayoutDescriptor::FastPointerLayout(), map->GetIsolate());
return CopyReplaceDescriptors(map, new_descriptors, new_layout_descriptor,
@@ -7344,7 +7592,7 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
if (!key->IsSymbol() || !Symbol::cast(key)->is_private()) {
int mask = DONT_DELETE | DONT_ENUM;
// READ_ONLY is an invalid attribute for JS setters/getters.
- if (details.type() != CALLBACKS || !value->IsAccessorPair()) {
+ if (details.type() != ACCESSOR_CONSTANT || !value->IsAccessorPair()) {
mask |= READ_ONLY;
}
details = details.CopyAddAttributes(
@@ -7534,13 +7782,8 @@ class IntrusivePrototypeTransitionIterator {
Map* GetTransition(int transitionNumber) {
FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
- return Map::cast(proto_trans->get(IndexFor(transitionNumber)));
- }
-
- int IndexFor(int transitionNumber) {
- return Map::kProtoTransitionHeaderSize +
- Map::kProtoTransitionMapOffset +
- transitionNumber * Map::kProtoTransitionElementsPerEntry;
+ int index = Map::kProtoTransitionHeaderSize + transitionNumber;
+ return Map::cast(proto_trans->get(index));
}
Map* map_;
@@ -8391,7 +8634,6 @@ Object* AccessorPair::GetComponent(AccessorComponent component) {
Handle<DeoptimizationInputData> DeoptimizationInputData::New(
Isolate* isolate, int deopt_entry_count, PretenureFlag pretenure) {
- DCHECK(deopt_entry_count > 0);
return Handle<DeoptimizationInputData>::cast(
isolate->factory()->NewFixedArray(LengthFor(deopt_entry_count),
pretenure));
@@ -8928,20 +9170,18 @@ static void CalculateLineEndsImpl(Isolate* isolate,
Vector<const SourceChar> src,
bool include_ending_line) {
const int src_len = src.length();
- StringSearch<uint8_t, SourceChar> search(isolate, STATIC_CHAR_VECTOR("\n"));
-
- // Find and record line ends.
- int position = 0;
- while (position != -1 && position < src_len) {
- position = search.Search(src, position);
- if (position != -1) {
- line_ends->Add(position);
- position++;
- } else if (include_ending_line) {
- // Even if the last line misses a line end, it is counted.
- line_ends->Add(src_len);
- return;
- }
+ UnicodeCache* cache = isolate->unicode_cache();
+ for (int i = 0; i < src_len - 1; i++) {
+ SourceChar current = src[i];
+ SourceChar next = src[i + 1];
+ if (cache->IsLineTerminatorSequence(current, next)) line_ends->Add(i);
+ }
+
+ if (src_len > 0 && cache->IsLineTerminatorSequence(src[src_len - 1], 0)) {
+ line_ends->Add(src_len - 1);
+ } else if (include_ending_line) {
+ // Even if the last line misses a line end, it is counted.
+ line_ends->Add(src_len);
}
}
@@ -9222,23 +9462,6 @@ bool String::SlowEquals(Handle<String> one, Handle<String> two) {
}
-bool String::MarkAsUndetectable() {
- if (StringShape(this).IsInternalized()) return false;
-
- Map* map = this->map();
- Heap* heap = GetHeap();
- if (map == heap->string_map()) {
- this->set_map(heap->undetectable_string_map());
- return true;
- } else if (map == heap->one_byte_string_map()) {
- this->set_map(heap->undetectable_one_byte_string_map());
- return true;
- }
- // Rest cannot be marked as undetectable
- return false;
-}
-
-
bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
int slen = length();
// Can't check exact length equality, but we can check bounds.
@@ -9249,10 +9472,10 @@ bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
return false;
}
int i;
- unsigned remaining_in_str = static_cast<unsigned>(str_len);
+ size_t remaining_in_str = static_cast<size_t>(str_len);
const uint8_t* utf8_data = reinterpret_cast<const uint8_t*>(str.start());
for (i = 0; i < slen && remaining_in_str > 0; i++) {
- unsigned cursor = 0;
+ size_t cursor = 0;
uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor);
DCHECK(cursor > 0 && cursor <= remaining_in_str);
if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
@@ -9426,13 +9649,13 @@ uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
// Start with a fake length which won't affect computation.
// It will be updated later.
StringHasher hasher(String::kMaxArrayIndexSize, seed);
- unsigned remaining = static_cast<unsigned>(vector_length);
+ size_t remaining = static_cast<size_t>(vector_length);
const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start());
int utf16_length = 0;
bool is_index = true;
DCHECK(hasher.is_array_index_);
while (remaining > 0) {
- unsigned consumed = 0;
+ size_t consumed = 0;
uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed);
DCHECK(consumed > 0 && consumed <= remaining);
stream += consumed;
@@ -9522,13 +9745,13 @@ int Map::Hash() {
static bool CheckEquivalent(Map* first, Map* second) {
- return
- first->constructor() == second->constructor() &&
- first->prototype() == second->prototype() &&
- first->instance_type() == second->instance_type() &&
- first->bit_field() == second->bit_field() &&
- first->bit_field2() == second->bit_field2() &&
- first->has_instance_call_handler() == second->has_instance_call_handler();
+ return first->constructor() == second->constructor() &&
+ first->prototype() == second->prototype() &&
+ first->instance_type() == second->instance_type() &&
+ first->bit_field() == second->bit_field() &&
+ first->is_extensible() == second->is_extensible() &&
+ first->has_instance_call_handler() ==
+ second->has_instance_call_handler();
}
@@ -9541,7 +9764,8 @@ bool Map::EquivalentToForNormalization(Map* other,
PropertyNormalizationMode mode) {
int properties = mode == CLEAR_INOBJECT_PROPERTIES
? 0 : other->inobject_properties();
- return CheckEquivalent(this, other) && inobject_properties() == properties;
+ return CheckEquivalent(this, other) && bit_field2() == other->bit_field2() &&
+ inobject_properties() == properties;
}
@@ -9601,9 +9825,7 @@ void JSFunction::MarkForOptimization() {
Isolate* isolate = GetIsolate();
DCHECK(isolate->use_crankshaft());
DCHECK(!IsOptimized());
- DCHECK(shared()->allows_lazy_compilation() ||
- code()->optimizable());
- DCHECK(!shared()->is_generator());
+ DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
@@ -9627,10 +9849,9 @@ void JSFunction::AttemptConcurrentOptimization() {
}
DCHECK(isolate->use_crankshaft());
DCHECK(!IsInOptimizationQueue());
- DCHECK(is_compiled() || isolate->DebuggerHasBreakPoints());
+ DCHECK(is_compiled() || isolate->debug()->has_break_points());
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
- DCHECK(!shared()->is_generator());
DCHECK(isolate->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
@@ -9724,7 +9945,7 @@ FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) {
FixedArray* code_map = FixedArray::cast(optimized_code_map());
if (!bound()) {
FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
- DCHECK_NE(NULL, cached_literals);
+ DCHECK_NOT_NULL(cached_literals);
return cached_literals;
}
return NULL;
@@ -9735,7 +9956,7 @@ Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) {
DCHECK(index > kEntriesStart);
FixedArray* code_map = FixedArray::cast(optimized_code_map());
Code* code = Code::cast(code_map->get(index));
- DCHECK_NE(NULL, code);
+ DCHECK_NOT_NULL(code);
return code;
}
@@ -9817,7 +10038,7 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
if (object->IsGlobalObject()) return;
if (object->IsJSGlobalProxy()) return;
if (mode == FAST_PROTOTYPE && !object->map()->is_prototype_map()) {
- // First normalize to ensure all JSFunctions are CONSTANT.
+ // First normalize to ensure all JSFunctions are DATA_CONSTANT.
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
"NormalizeAsPrototype");
}
@@ -9832,6 +10053,19 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
Handle<Map> new_map = Map::Copy(handle(object->map()), "CopyAsPrototype");
JSObject::MigrateToMap(object, new_map);
}
+ if (object->map()->constructor()->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(object->map()->constructor());
+ // Replace the pointer to the exact constructor with the Object function
+ // from the same context if undetectable from JS. This is to avoid keeping
+ // memory alive unnecessarily.
+ if (!constructor->shared()->IsApiFunction() &&
+ object->class_name() ==
+ object->GetIsolate()->heap()->Object_string()) {
+ Context* context = constructor->context()->native_context();
+ JSFunction* object_function = context->object_function();
+ object->map()->set_constructor(object_function);
+ }
+ }
object->map()->set_is_prototype_map(true);
}
}
@@ -10027,16 +10261,17 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
bool JSFunction::RemovePrototype() {
Context* native_context = context()->native_context();
- Map* no_prototype_map = shared()->strict_mode() == SLOPPY
- ? native_context->sloppy_function_without_prototype_map()
- : native_context->strict_function_without_prototype_map();
+ Map* no_prototype_map =
+ is_strict(shared()->language_mode())
+ ? native_context->strict_function_without_prototype_map()
+ : native_context->sloppy_function_without_prototype_map();
if (map() == no_prototype_map) return true;
#ifdef DEBUG
- if (map() != (shared()->strict_mode() == SLOPPY
- ? native_context->sloppy_function_map()
- : native_context->strict_function_map())) {
+ if (map() != (is_strict(shared()->language_mode())
+ ? native_context->strict_function_map()
+ : native_context->sloppy_function_map())) {
return false;
}
#endif
@@ -10451,6 +10686,40 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
}
+void SharedFunctionInfo::InitFromFunctionLiteral(
+ Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
+ shared_info->set_length(lit->scope()->default_function_length());
+ if (IsSubclassConstructor(lit->kind())) {
+ shared_info->set_internal_formal_parameter_count(lit->parameter_count() +
+ 1);
+ } else {
+ shared_info->set_internal_formal_parameter_count(lit->parameter_count());
+ }
+ shared_info->set_function_token_position(lit->function_token_position());
+ shared_info->set_start_position(lit->start_position());
+ shared_info->set_end_position(lit->end_position());
+ shared_info->set_is_expression(lit->is_expression());
+ shared_info->set_is_anonymous(lit->is_anonymous());
+ shared_info->set_inferred_name(*lit->inferred_name());
+ shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+ shared_info->set_allows_lazy_compilation_without_context(
+ lit->AllowsLazyCompilationWithoutContext());
+ shared_info->set_language_mode(lit->language_mode());
+ shared_info->set_uses_arguments(lit->scope()->arguments() != NULL);
+ shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
+ shared_info->set_ast_node_count(lit->ast_node_count());
+ shared_info->set_is_function(lit->is_function());
+ if (lit->dont_optimize_reason() != kNoReason) {
+ shared_info->DisableOptimization(lit->dont_optimize_reason());
+ }
+ shared_info->set_dont_cache(
+ lit->flags()->Contains(AstPropertiesFlag::kDontCache));
+ shared_info->set_kind(lit->kind());
+ shared_info->set_uses_super_property(lit->uses_super_property());
+ shared_info->set_asm_function(lit->scope()->asm_function());
+}
+
+
bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
DCHECK(!id.IsNone());
Code* unoptimized = code();
@@ -10966,6 +11235,13 @@ void Code::ClearInlineCaches(Code::Kind* kind) {
void SharedFunctionInfo::ClearTypeFeedbackInfo() {
feedback_vector()->ClearSlots(this);
+ feedback_vector()->ClearICSlots(this);
+}
+
+
+void SharedFunctionInfo::ClearTypeFeedbackInfoAtGCTime() {
+ feedback_vector()->ClearSlots(this);
+ feedback_vector()->ClearICSlotsAtGCTime(this);
}
@@ -11136,25 +11412,10 @@ Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
void Code::PrintDeoptLocation(FILE* out, int bailout_id) {
- const char* last_comment = NULL;
- int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
- | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::COMMENT) {
- last_comment = reinterpret_cast<const char*>(info->data());
- } else if (last_comment != NULL) {
- if ((bailout_id == Deoptimizer::GetDeoptimizationId(
- GetIsolate(), info->target_address(), Deoptimizer::EAGER)) ||
- (bailout_id == Deoptimizer::GetDeoptimizationId(
- GetIsolate(), info->target_address(), Deoptimizer::SOFT)) ||
- (bailout_id == Deoptimizer::GetDeoptimizationId(
- GetIsolate(), info->target_address(), Deoptimizer::LAZY))) {
- CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
- PrintF(out, " %s\n", last_comment);
- return;
- }
- }
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, bailout_id);
+ if (info.deopt_reason != Deoptimizer::kNoReason || info.raw_position != 0) {
+ PrintF(out, " ;;; deoptimize at %d: %s\n", info.raw_position,
+ Deoptimizer::GetDeoptReason(info.deopt_reason));
}
}
@@ -11185,6 +11446,29 @@ const char* Code::Kind2String(Kind kind) {
}
+Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
+ DCHECK(code->kind() == OPTIMIZED_FUNCTION);
+ WeakCell* raw_cell = code->CachedWeakCell();
+ if (raw_cell != NULL) return Handle<WeakCell>(raw_cell);
+ Handle<WeakCell> cell = code->GetIsolate()->factory()->NewWeakCell(code);
+ DeoptimizationInputData::cast(code->deoptimization_data())
+ ->SetWeakCellCache(*cell);
+ return cell;
+}
+
+
+WeakCell* Code::CachedWeakCell() {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ Object* weak_cell_cache =
+ DeoptimizationInputData::cast(deoptimization_data())->WeakCellCache();
+ if (weak_cell_cache->IsWeakCell()) {
+ DCHECK(this == WeakCell::cast(weak_cell_cache)->value());
+ return WeakCell::cast(weak_cell_cache);
+ }
+ return NULL;
+}
+
+
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(
@@ -11400,7 +11684,8 @@ const char* Code::StubType2String(StubType type) {
void Code::PrintExtraICState(std::ostream& os, // NOLINT
Kind kind, ExtraICState extra) {
os << "extra_ic_state = ";
- if ((kind == STORE_IC || kind == KEYED_STORE_IC) && (extra == STRICT)) {
+ if ((kind == STORE_IC || kind == KEYED_STORE_IC) &&
+ is_strict(static_cast<LanguageMode>(extra))) {
os << "STRICT\n";
} else {
os << extra << "\n";
@@ -11813,9 +12098,12 @@ MaybeHandle<Object> JSArray::SetElementsLength(
SLOPPY).Assert();
}
- SetProperty(deleted, isolate->factory()->length_string(),
- isolate->factory()->NewNumberFromUint(delete_count),
- STRICT).Assert();
+ RETURN_ON_EXCEPTION(
+ isolate,
+ SetProperty(deleted, isolate->factory()->length_string(),
+ isolate->factory()->NewNumberFromUint(delete_count),
+ STRICT),
+ Object);
}
RETURN_ON_EXCEPTION(
@@ -11827,17 +12115,12 @@ MaybeHandle<Object> JSArray::SetElementsLength(
Handle<Map> Map::GetPrototypeTransition(Handle<Map> map,
Handle<Object> prototype) {
+ DisallowHeapAllocation no_gc;
FixedArray* cache = map->GetPrototypeTransitions();
int number_of_transitions = map->NumberOfProtoTransitions();
- const int proto_offset =
- kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
- const int map_offset = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
- const int step = kProtoTransitionElementsPerEntry;
for (int i = 0; i < number_of_transitions; i++) {
- if (cache->get(proto_offset + i * step) == *prototype) {
- Object* result = cache->get(map_offset + i * step);
- return Handle<Map>(Map::cast(result));
- }
+ Map* map = Map::cast(cache->get(kProtoTransitionHeaderSize + i));
+ if (map->prototype() == *prototype) return handle(map);
}
return Handle<Map>();
}
@@ -11853,28 +12136,27 @@ Handle<Map> Map::PutPrototypeTransition(Handle<Map> map,
if (map->is_prototype_map()) return map;
if (map->is_dictionary_map() || !FLAG_cache_prototype_transitions) return map;
- const int step = kProtoTransitionElementsPerEntry;
const int header = kProtoTransitionHeaderSize;
Handle<FixedArray> cache(map->GetPrototypeTransitions());
- int capacity = (cache->length() - header) / step;
+ int capacity = cache->length() - header;
int transitions = map->NumberOfProtoTransitions() + 1;
if (transitions > capacity) {
- if (capacity > kMaxCachedPrototypeTransitions) return map;
+ // Grow array by factor 2 up to MaxCachedPrototypeTransitions.
+ int new_capacity = Min(kMaxCachedPrototypeTransitions, transitions * 2);
+ if (new_capacity == capacity) return map;
- // Grow array by factor 2 over and above what we need.
- cache = FixedArray::CopySize(cache, transitions * 2 * step + header);
+ cache = FixedArray::CopySize(cache, header + new_capacity);
SetPrototypeTransitions(map, cache);
}
// Reload number of transitions as GC might shrink them.
int last = map->NumberOfProtoTransitions();
- int entry = header + last * step;
+ int entry = header + last;
- cache->set(entry + kProtoTransitionPrototypeOffset, *prototype);
- cache->set(entry + kProtoTransitionMapOffset, *target_map);
+ cache->set(entry, *target_map);
map->SetNumberOfProtoTransitions(last + 1);
return map;
@@ -11906,9 +12188,9 @@ void Map::ZapPrototypeTransitions() {
void Map::AddDependentCompilationInfo(Handle<Map> map,
DependentCode::DependencyGroup group,
CompilationInfo* info) {
- Handle<DependentCode> codes =
- DependentCode::Insert(handle(map->dependent_code(), info->isolate()),
- group, info->object_wrapper());
+ Handle<DependentCode> codes = DependentCode::InsertCompilationInfo(
+ handle(map->dependent_code(), info->isolate()), group,
+ info->object_wrapper());
if (*codes != map->dependent_code()) map->set_dependent_code(*codes);
info->dependencies(group)->Add(map, info->zone());
}
@@ -11918,8 +12200,9 @@ void Map::AddDependentCompilationInfo(Handle<Map> map,
void Map::AddDependentCode(Handle<Map> map,
DependentCode::DependencyGroup group,
Handle<Code> code) {
- Handle<DependentCode> codes = DependentCode::Insert(
- Handle<DependentCode>(map->dependent_code()), group, code);
+ Handle<WeakCell> cell = Code::WeakCellFor(code);
+ Handle<DependentCode> codes = DependentCode::InsertWeakCode(
+ Handle<DependentCode>(map->dependent_code()), group, cell);
if (*codes != map->dependent_code()) map->set_dependent_code(*codes);
}
@@ -11951,6 +12234,20 @@ DependentCode* DependentCode::ForObject(Handle<HeapObject> object,
}
+Handle<DependentCode> DependentCode::InsertCompilationInfo(
+ Handle<DependentCode> entries, DependencyGroup group,
+ Handle<Foreign> info) {
+ return Insert(entries, group, info);
+}
+
+
+Handle<DependentCode> DependentCode::InsertWeakCode(
+ Handle<DependentCode> entries, DependencyGroup group,
+ Handle<WeakCell> code_cell) {
+ return Insert(entries, group, code_cell);
+}
+
+
Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
DependencyGroup group,
Handle<Object> object) {
@@ -11963,27 +12260,13 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
if (entries->object_at(i) == *object) return entries;
}
if (entries->length() < kCodesStartIndex + number_of_entries + 1) {
- int capacity = kCodesStartIndex + number_of_entries + 1;
- if (capacity > 5) capacity = capacity * 5 / 4;
- Handle<DependentCode> new_entries = Handle<DependentCode>::cast(
- FixedArray::CopySize(entries, capacity, TENURED));
- // The number of codes can change after GC.
+ entries = EnsureSpace(entries);
+ // The number of codes can change after Compact and GC.
starts.Recompute(*entries);
start = starts.at(group);
end = starts.at(group + 1);
- number_of_entries = starts.number_of_entries();
- for (int i = 0; i < number_of_entries; i++) {
- entries->clear_at(i);
- }
- // If the old fixed array was empty, we need to reset counters of the
- // new array.
- if (number_of_entries == 0) {
- for (int g = 0; g < kGroupCount; g++) {
- new_entries->set_number_of_entries(static_cast<DependencyGroup>(g), 0);
- }
- }
- entries = new_entries;
}
+
entries->ExtendGroup(group);
entries->set_object_at(end, *object);
entries->set_number_of_entries(group, end + 1 - start);
@@ -11991,42 +12274,82 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
}
-void DependentCode::UpdateToFinishedCode(DependencyGroup group,
- CompilationInfo* info,
- Code* code) {
+Handle<DependentCode> DependentCode::EnsureSpace(
+ Handle<DependentCode> entries) {
+ if (entries->length() == 0) {
+ entries = Handle<DependentCode>::cast(
+ FixedArray::CopySize(entries, kCodesStartIndex + 1, TENURED));
+ for (int g = 0; g < kGroupCount; g++) {
+ entries->set_number_of_entries(static_cast<DependencyGroup>(g), 0);
+ }
+ return entries;
+ }
+ if (entries->Compact()) return entries;
+ GroupStartIndexes starts(*entries);
+ int capacity =
+ kCodesStartIndex + DependentCode::Grow(starts.number_of_entries());
+ return Handle<DependentCode>::cast(
+ FixedArray::CopySize(entries, capacity, TENURED));
+}
+
+
+bool DependentCode::Compact() {
+ GroupStartIndexes starts(this);
+ int n = 0;
+ for (int g = 0; g < kGroupCount; g++) {
+ int start = starts.at(g);
+ int end = starts.at(g + 1);
+ int count = 0;
+ DCHECK(start >= n);
+ for (int i = start; i < end; i++) {
+ Object* obj = object_at(i);
+ if (!obj->IsWeakCell() || !WeakCell::cast(obj)->cleared()) {
+ if (i != n + count) {
+ copy(i, n + count);
+ }
+ count++;
+ }
+ }
+ if (count != end - start) {
+ set_number_of_entries(static_cast<DependencyGroup>(g), count);
+ }
+ n += count;
+ }
+ return n < starts.number_of_entries();
+}
+
+
+void DependentCode::UpdateToFinishedCode(DependencyGroup group, Foreign* info,
+ WeakCell* code_cell) {
DisallowHeapAllocation no_gc;
- AllowDeferredHandleDereference get_object_wrapper;
- Foreign* info_wrapper = *info->object_wrapper();
GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
for (int i = start; i < end; i++) {
- if (object_at(i) == info_wrapper) {
- set_object_at(i, code);
+ if (object_at(i) == info) {
+ set_object_at(i, code_cell);
break;
}
}
#ifdef DEBUG
for (int i = start; i < end; i++) {
- DCHECK(is_code_at(i) || compilation_info_at(i) != info);
+ DCHECK(object_at(i) != info);
}
#endif
}
void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
- CompilationInfo* info) {
+ Foreign* info) {
DisallowHeapAllocation no_allocation;
- AllowDeferredHandleDereference get_object_wrapper;
- Foreign* info_wrapper = *info->object_wrapper();
GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
// Find compilation info wrapper.
int info_pos = -1;
for (int i = start; i < end; i++) {
- if (object_at(i) == info_wrapper) {
+ if (object_at(i) == info) {
info_pos = i;
break;
}
@@ -12047,18 +12370,18 @@ void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
#ifdef DEBUG
for (int i = start; i < end - 1; i++) {
- DCHECK(is_code_at(i) || compilation_info_at(i) != info);
+ DCHECK(object_at(i) != info);
}
#endif
}
-bool DependentCode::Contains(DependencyGroup group, Code* code) {
+bool DependentCode::Contains(DependencyGroup group, WeakCell* code_cell) {
GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
for (int i = start; i < end; i++) {
- if (object_at(i) == code) return true;
+ if (object_at(i) == code_cell) return true;
}
return false;
}
@@ -12076,15 +12399,24 @@ bool DependentCode::MarkCodeForDeoptimization(
// Mark all the code that needs to be deoptimized.
bool marked = false;
+ bool invalidate_embedded_objects = group == kWeakCodeGroup;
for (int i = start; i < end; i++) {
- if (is_code_at(i)) {
- Code* code = code_at(i);
+ Object* obj = object_at(i);
+ if (obj->IsWeakCell()) {
+ WeakCell* cell = WeakCell::cast(obj);
+ if (cell->cleared()) continue;
+ Code* code = Code::cast(cell->value());
if (!code->marked_for_deoptimization()) {
SetMarkedForDeoptimization(code, group);
+ if (invalidate_embedded_objects) {
+ code->InvalidateEmbeddedObjects();
+ }
marked = true;
}
} else {
- CompilationInfo* info = compilation_info_at(i);
+ DCHECK(obj->IsForeign());
+ CompilationInfo* info = reinterpret_cast<CompilationInfo*>(
+ Foreign::cast(obj)->foreign_address());
info->AbortDueToDependencyChange();
}
}
@@ -12108,7 +12440,6 @@ void DependentCode::DeoptimizeDependentCodeGroup(
DCHECK(AllowCodeDependencyChange::IsAllowed());
DisallowHeapAllocation no_allocation_scope;
bool marked = MarkCodeForDeoptimization(isolate, group);
-
if (marked) Deoptimizer::DeoptimizeMarkedCode(isolate);
}
@@ -12223,6 +12554,13 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
real_receiver =
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
iter.Advance();
+ if (!real_receiver->map()->is_extensible()) {
+ Handle<Object> args[] = {object};
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("non_extensible_proto",
+ HandleVector(args, arraysize(args))),
+ Object);
+ }
}
}
@@ -12279,18 +12617,14 @@ MaybeHandle<AccessorPair> JSObject::GetOwnElementAccessorPair(
// Check for lookup interceptor.
if (object->HasIndexedInterceptor()) return MaybeHandle<AccessorPair>();
- return object->GetElementsAccessor()->GetAccessorPair(object, object, index);
+ return object->GetElementsAccessor()->GetAccessorPair(object, index);
}
MaybeHandle<Object> JSObject::SetElementWithInterceptor(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictMode strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, LanguageMode language_mode,
+ bool check_prototype, SetPropertyMode set_mode) {
Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing
@@ -12312,9 +12646,7 @@ MaybeHandle<Object> JSObject::SetElementWithInterceptor(
}
return SetElementWithoutInterceptor(object, index, value, attributes,
- strict_mode,
- check_prototype,
- set_mode);
+ language_mode, check_prototype, set_mode);
}
@@ -12362,11 +12694,6 @@ MaybeHandle<Object> JSObject::GetElementWithCallback(
return isolate->factory()->undefined_value();
}
- if (structure->IsDeclaredAccessorInfo()) {
- return GetDeclaredAccessorProperty(
- receiver, Handle<DeclaredAccessorInfo>::cast(structure), isolate);
- }
-
UNREACHABLE();
return MaybeHandle<Object>();
}
@@ -12374,7 +12701,7 @@ MaybeHandle<Object> JSObject::GetElementWithCallback(
MaybeHandle<Object> JSObject::SetElementWithCallback(
Handle<Object> object, Handle<Object> structure, uint32_t index,
- Handle<Object> value, Handle<JSObject> holder, StrictMode strict_mode) {
+ Handle<Object> value, Handle<JSObject> holder, LanguageMode language_mode) {
Isolate* isolate = holder->GetIsolate();
// We should never get here to initialize a const with the hole
@@ -12408,18 +12735,16 @@ MaybeHandle<Object> JSObject::SetElementWithCallback(
return SetPropertyWithDefinedSetter(
object, Handle<JSReceiver>::cast(setter), value);
} else {
- if (strict_mode == SLOPPY) return value;
+ if (is_sloppy(language_mode)) return value;
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> args[2] = { key, holder };
- THROW_NEW_ERROR(
- isolate, NewTypeError("no_setter_in_callback", HandleVector(args, 2)),
- Object);
+ Handle<Object> args[] = {key, holder};
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("no_setter_in_callback",
+ HandleVector(args, arraysize(args))),
+ Object);
}
}
- // TODO(dcarney): Handle correctly.
- if (structure->IsDeclaredAccessorInfo()) return value;
-
UNREACHABLE();
return MaybeHandle<Object>();
}
@@ -12455,7 +12780,7 @@ bool JSObject::HasDictionaryArgumentsElements() {
MaybeHandle<Object> JSObject::SetFastElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictMode strict_mode,
+ LanguageMode language_mode,
bool check_prototype) {
DCHECK(object->HasFastSmiOrObjectElements() ||
object->HasFastArgumentsElements());
@@ -12484,7 +12809,7 @@ MaybeHandle<Object> JSObject::SetFastElement(Handle<JSObject> object,
(index >= capacity || backing_store->get(index)->IsTheHole())) {
bool found;
MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes(
- object, index, value, &found, strict_mode);
+ object, index, value, &found, language_mode);
if (found) return result;
}
@@ -12527,7 +12852,7 @@ MaybeHandle<Object> JSObject::SetFastElement(Handle<JSObject> object,
}
if (convert_to_slow) {
NormalizeElements(object);
- return SetDictionaryElement(object, index, value, NONE, strict_mode,
+ return SetDictionaryElement(object, index, value, NONE, language_mode,
check_prototype);
}
}
@@ -12581,13 +12906,9 @@ MaybeHandle<Object> JSObject::SetFastElement(Handle<JSObject> object,
MaybeHandle<Object> JSObject::SetDictionaryElement(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictMode strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, LanguageMode language_mode,
+ bool check_prototype, SetPropertyMode set_mode) {
DCHECK(object->HasDictionaryElements() ||
object->HasDictionaryArgumentsElements());
Isolate* isolate = object->GetIsolate();
@@ -12604,29 +12925,33 @@ MaybeHandle<Object> JSObject::SetDictionaryElement(
if (entry != SeededNumberDictionary::kNotFound) {
Handle<Object> element(dictionary->ValueAt(entry), isolate);
PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
+ if (details.type() == ACCESSOR_CONSTANT && set_mode == SET_PROPERTY) {
return SetElementWithCallback(object, element, index, value, object,
- strict_mode);
+ language_mode);
+ } else if (set_mode == DEFINE_PROPERTY && !details.IsConfigurable() &&
+ details.kind() == kAccessor) {
+ return RedefineNonconfigurableProperty(
+ isolate, isolate->factory()->NewNumberFromUint(index),
+ isolate->factory()->undefined_value(), language_mode);
+
+ } else if ((set_mode == DEFINE_PROPERTY && !details.IsConfigurable() &&
+ details.IsReadOnly()) ||
+ (set_mode == SET_PROPERTY && details.IsReadOnly() &&
+ !element->IsTheHole())) {
+ // If a value has not been initialized we allow writing to it even if it
+ // is read-only (a declared const that has not been initialized).
+ return WriteToReadOnlyProperty(
+ isolate, object, isolate->factory()->NewNumberFromUint(index),
+ isolate->factory()->undefined_value(), language_mode);
} else {
+ DCHECK(details.IsConfigurable() || !details.IsReadOnly() ||
+ element->IsTheHole());
dictionary->UpdateMaxNumberKey(index);
- // If a value has not been initialized we allow writing to it even if it
- // is read-only (a declared const that has not been initialized). If a
- // value is being defined we skip attribute checks completely.
if (set_mode == DEFINE_PROPERTY) {
- details =
- PropertyDetails(attributes, FIELD, details.dictionary_index());
+ details = PropertyDetails(attributes, DATA, details.dictionary_index());
dictionary->DetailsAtPut(entry, details);
- } else if (details.IsReadOnly() && !element->IsTheHole()) {
- if (strict_mode == SLOPPY) {
- return isolate->factory()->undefined_value();
- } else {
- Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { number, object };
- THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
- HandleVector(args, 2)),
- Object);
- }
}
+
// Elements of the arguments object in slow mode might be slow aliases.
if (is_arguments && element->IsAliasedArgumentsEntry()) {
Handle<AliasedArgumentsEntry> entry =
@@ -12646,26 +12971,27 @@ MaybeHandle<Object> JSObject::SetDictionaryElement(
if (check_prototype) {
bool found;
MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes(
- object, index, value, &found, strict_mode);
+ object, index, value, &found, language_mode);
if (found) return result;
}
// When we set the is_extensible flag to false we always force the
// element into dictionary mode (and force them to stay there).
if (!object->map()->is_extensible()) {
- if (strict_mode == SLOPPY) {
+ if (is_sloppy(language_mode)) {
return isolate->factory()->undefined_value();
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> name = isolate->factory()->NumberToString(number);
- Handle<Object> args[1] = { name };
- THROW_NEW_ERROR(isolate, NewTypeError("object_not_extensible",
- HandleVector(args, 1)),
+ Handle<Object> args[] = {name};
+ THROW_NEW_ERROR(isolate,
+ NewTypeError("object_not_extensible",
+ HandleVector(args, arraysize(args))),
Object);
}
}
- PropertyDetails details(attributes, FIELD, 0);
+ PropertyDetails details(attributes, DATA, 0);
Handle<SeededNumberDictionary> new_dictionary =
SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
details);
@@ -12717,12 +13043,11 @@ MaybeHandle<Object> JSObject::SetDictionaryElement(
return value;
}
-MaybeHandle<Object> JSObject::SetFastDoubleElement(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictMode strict_mode,
- bool check_prototype) {
+MaybeHandle<Object> JSObject::SetFastDoubleElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ LanguageMode language_mode,
+ bool check_prototype) {
DCHECK(object->HasFastDoubleElements());
Handle<FixedArrayBase> base_elms(FixedArrayBase::cast(object->elements()));
@@ -12735,7 +13060,7 @@ MaybeHandle<Object> JSObject::SetFastDoubleElement(
Handle<FixedDoubleArray>::cast(base_elms)->is_the_hole(index))) {
bool found;
MaybeHandle<Object> result = SetElementWithCallbackSetterInPrototypes(
- object, index, value, &found, strict_mode);
+ object, index, value, &found, language_mode);
if (found) return result;
}
@@ -12757,7 +13082,7 @@ MaybeHandle<Object> JSObject::SetFastDoubleElement(
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
object->GetIsolate(), result,
- SetFastElement(object, index, value, strict_mode, check_prototype),
+ SetFastElement(object, index, value, language_mode, check_prototype),
Object);
JSObject::ValidateElements(object);
return result;
@@ -12812,38 +13137,38 @@ MaybeHandle<Object> JSObject::SetFastDoubleElement(
NormalizeElements(object);
DCHECK(object->HasDictionaryElements());
- return SetElement(object, index, value, NONE, strict_mode, check_prototype);
+ return SetElement(object, index, value, NONE, language_mode, check_prototype);
}
MaybeHandle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
- uint32_t index,
- Handle<Object> value,
+ uint32_t index, Handle<Object> value,
PropertyAttributes attributes,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
if (object->IsJSProxy()) {
- return JSProxy::SetElementWithHandler(
- Handle<JSProxy>::cast(object), object, index, value, strict_mode);
+ return JSProxy::SetElementWithHandler(Handle<JSProxy>::cast(object), object,
+ index, value, language_mode);
}
- return JSObject::SetElement(
- Handle<JSObject>::cast(object), index, value, attributes, strict_mode);
+ return JSObject::SetElement(Handle<JSObject>::cast(object), index, value,
+ attributes, language_mode);
}
MaybeHandle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictMode strict_mode) {
+ PropertyAttributes attributes,
+ LanguageMode language_mode) {
DCHECK(!object->HasExternalArrayElements());
- return JSObject::SetElement(object, index, value, NONE, strict_mode, false);
+ return JSObject::SetElement(object, index, value, attributes, language_mode,
+ false);
}
MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
+ uint32_t index, Handle<Object> value,
PropertyAttributes attributes,
- StrictMode strict_mode,
+ LanguageMode language_mode,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = object->GetIsolate();
@@ -12872,7 +13197,7 @@ MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
return SetElement(
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index,
- value, attributes, strict_mode, check_prototype, set_mode);
+ value, attributes, language_mode, check_prototype, set_mode);
}
// Don't allow element properties to be redefined for external arrays.
@@ -12895,10 +13220,12 @@ MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
if (!object->map()->is_observed()) {
return object->HasIndexedInterceptor()
- ? SetElementWithInterceptor(object, index, value, attributes,
- strict_mode, check_prototype, set_mode)
- : SetElementWithoutInterceptor(object, index, value, attributes,
- strict_mode, check_prototype, set_mode);
+ ? SetElementWithInterceptor(object, index, value, attributes,
+ language_mode, check_prototype,
+ set_mode)
+ : SetElementWithoutInterceptor(object, index, value, attributes,
+ language_mode, check_prototype,
+ set_mode);
}
Maybe<PropertyAttributes> maybe =
@@ -12925,12 +13252,11 @@ MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
object->HasIndexedInterceptor()
- ? SetElementWithInterceptor(
- object, index, value, attributes,
- strict_mode, check_prototype, set_mode)
- : SetElementWithoutInterceptor(
- object, index, value, attributes,
- strict_mode, check_prototype, set_mode),
+ ? SetElementWithInterceptor(object, index, value, attributes,
+ language_mode, check_prototype, set_mode)
+ : SetElementWithoutInterceptor(object, index, value, attributes,
+ language_mode, check_prototype,
+ set_mode),
Object);
Handle<String> name = isolate->factory()->Uint32ToString(index);
@@ -12995,13 +13321,9 @@ MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
MaybeHandle<Object> JSObject::SetElementWithoutInterceptor(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictMode strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, LanguageMode language_mode,
+ bool check_prototype, SetPropertyMode set_mode) {
DCHECK(object->HasDictionaryElements() ||
object->HasDictionaryArgumentsElements() ||
(attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
@@ -13018,7 +13340,7 @@ MaybeHandle<Object> JSObject::SetElementWithoutInterceptor(
}
if (object->IsJSArray() && JSArray::WouldChangeReadOnlyLength(
Handle<JSArray>::cast(object), index)) {
- if (strict_mode == SLOPPY) {
+ if (is_sloppy(language_mode)) {
return value;
} else {
return JSArray::ReadOnlyLengthError(Handle<JSArray>::cast(object));
@@ -13029,10 +13351,11 @@ MaybeHandle<Object> JSObject::SetElementWithoutInterceptor(
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- return SetFastElement(object, index, value, strict_mode, check_prototype);
+ return SetFastElement(object, index, value, language_mode,
+ check_prototype);
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- return SetFastDoubleElement(object, index, value, strict_mode,
+ return SetFastDoubleElement(object, index, value, language_mode,
check_prototype);
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
@@ -13052,9 +13375,8 @@ MaybeHandle<Object> JSObject::SetElementWithoutInterceptor(
#undef TYPED_ARRAY_CASE
case DICTIONARY_ELEMENTS:
- return SetDictionaryElement(object, index, value, attributes, strict_mode,
- check_prototype,
- set_mode);
+ return SetDictionaryElement(object, index, value, attributes,
+ language_mode, check_prototype, set_mode);
case SLOPPY_ARGUMENTS_ELEMENTS: {
Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
uint32_t length = parameter_map->length();
@@ -13078,11 +13400,9 @@ MaybeHandle<Object> JSObject::SetElementWithoutInterceptor(
Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
if (arguments->IsDictionary()) {
return SetDictionaryElement(object, index, value, attributes,
- strict_mode,
- check_prototype,
- set_mode);
+ language_mode, check_prototype, set_mode);
} else {
- return SetFastElement(object, index, value, strict_mode,
+ return SetFastElement(object, index, value, language_mode,
check_prototype);
}
}
@@ -13207,7 +13527,7 @@ void AllocationSite::AddDependentCompilationInfo(
CompilationInfo* info) {
Handle<DependentCode> dep(site->dependent_code());
Handle<DependentCode> codes =
- DependentCode::Insert(dep, group, info->object_wrapper());
+ DependentCode::InsertCompilationInfo(dep, group, info->object_wrapper());
if (*codes != site->dependent_code()) site->set_dependent_code(*codes);
info->dependencies(group)->Add(Handle<HeapObject>(*site), info->zone());
}
@@ -13344,16 +13664,6 @@ void JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray> array,
}
-bool JSArray::IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
- Isolate* isolate = jsarray_map->GetIsolate();
- DCHECK(!jsarray_map->is_dictionary_map());
- LookupResult lookup(isolate);
- Handle<Name> length_string = isolate->factory()->length_string();
- jsarray_map->LookupDescriptor(NULL, *length_string, &lookup);
- return lookup.IsReadOnly();
-}
-
-
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
LookupIterator it(array, array->GetIsolate()->factory()->length_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
@@ -13376,17 +13686,17 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
MaybeHandle<Object> JSArray::ReadOnlyLengthError(Handle<JSArray> array) {
Isolate* isolate = array->GetIsolate();
Handle<Name> length = isolate->factory()->length_string();
- Handle<Object> args[2] = { length, array };
+ Handle<Object> args[] = {length, array};
THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
HandleVector(args, arraysize(args))),
Object);
}
-MaybeHandle<Object> JSObject::GetElementWithInterceptor(
- Handle<JSObject> object,
- Handle<Object> receiver,
- uint32_t index) {
+MaybeHandle<Object> JSObject::GetElementWithInterceptor(Handle<JSObject> object,
+ Handle<Object> receiver,
+ uint32_t index,
+ bool check_prototype) {
Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing
@@ -13411,6 +13721,8 @@ MaybeHandle<Object> JSObject::GetElementWithInterceptor(
}
}
+ if (!check_prototype) return MaybeHandle<Object>();
+
ElementsAccessor* handler = object->GetElementsAccessor();
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -13713,10 +14025,8 @@ MaybeHandle<JSObject> JSObject::GetKeysForNamedInterceptor(
result = args.Call(enum_fun);
}
if (result.IsEmpty()) return MaybeHandle<JSObject>();
-#if ENABLE_EXTRA_CHECKS
- CHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
- v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
-#endif
+ DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
+ v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
// Rebox before returning.
return handle(*v8::Utils::OpenHandle(*result), isolate);
}
@@ -13738,10 +14048,8 @@ MaybeHandle<JSObject> JSObject::GetKeysForIndexedInterceptor(
result = args.Call(enum_fun);
}
if (result.IsEmpty()) return MaybeHandle<JSObject>();
-#if ENABLE_EXTRA_CHECKS
- CHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
- v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
-#endif
+ DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
+ v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
// Rebox before returning.
return handle(*v8::Utils::OpenHandle(*result), isolate);
}
@@ -14123,14 +14431,12 @@ void Symbol::SymbolShortPrint(std::ostream& os) {
// StringSharedKeys are used as keys in the eval cache.
class StringSharedKey : public HashTableKey {
public:
- StringSharedKey(Handle<String> source,
- Handle<SharedFunctionInfo> shared,
- StrictMode strict_mode,
- int scope_position)
+ StringSharedKey(Handle<String> source, Handle<SharedFunctionInfo> shared,
+ LanguageMode language_mode, int scope_position)
: source_(source),
shared_(shared),
- strict_mode_(strict_mode),
- scope_position_(scope_position) { }
+ language_mode_(language_mode),
+ scope_position_(scope_position) {}
bool IsMatch(Object* other) OVERRIDE {
DisallowHeapAllocation no_allocation;
@@ -14142,10 +14448,10 @@ class StringSharedKey : public HashTableKey {
FixedArray* other_array = FixedArray::cast(other);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
if (shared != *shared_) return false;
- int strict_unchecked = Smi::cast(other_array->get(2))->value();
- DCHECK(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
- StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
- if (strict_mode != strict_mode_) return false;
+ int language_unchecked = Smi::cast(other_array->get(2))->value();
+ DCHECK(is_valid_language_mode(language_unchecked));
+ LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ if (language_mode != language_mode_) return false;
int scope_position = Smi::cast(other_array->get(3))->value();
if (scope_position != scope_position_) return false;
String* source = String::cast(other_array->get(1));
@@ -14154,7 +14460,7 @@ class StringSharedKey : public HashTableKey {
static uint32_t StringSharedHashHelper(String* source,
SharedFunctionInfo* shared,
- StrictMode strict_mode,
+ LanguageMode language_mode,
int scope_position) {
uint32_t hash = source->Hash();
if (shared->HasSourceCode()) {
@@ -14165,14 +14471,16 @@ class StringSharedKey : public HashTableKey {
// collection.
Script* script(Script::cast(shared->script()));
hash ^= String::cast(script->source())->Hash();
- if (strict_mode == STRICT) hash ^= 0x8000;
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ if (is_strict(language_mode)) hash ^= 0x8000;
+ if (is_strong(language_mode)) hash ^= 0x10000;
hash += scope_position;
}
return hash;
}
uint32_t Hash() OVERRIDE {
- return StringSharedHashHelper(*source_, *shared_, strict_mode_,
+ return StringSharedHashHelper(*source_, *shared_, language_mode_,
scope_position_);
}
@@ -14184,12 +14492,12 @@ class StringSharedKey : public HashTableKey {
FixedArray* other_array = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
String* source = String::cast(other_array->get(1));
- int strict_unchecked = Smi::cast(other_array->get(2))->value();
- DCHECK(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
- StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
+ int language_unchecked = Smi::cast(other_array->get(2))->value();
+ DCHECK(is_valid_language_mode(language_unchecked));
+ LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
int scope_position = Smi::cast(other_array->get(3))->value();
- return StringSharedHashHelper(
- source, shared, strict_mode, scope_position);
+ return StringSharedHashHelper(source, shared, language_mode,
+ scope_position);
}
@@ -14197,7 +14505,7 @@ class StringSharedKey : public HashTableKey {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
array->set(0, *shared_);
array->set(1, *source_);
- array->set(2, Smi::FromInt(strict_mode_));
+ array->set(2, Smi::FromInt(language_mode_));
array->set(3, Smi::FromInt(scope_position_));
return array;
}
@@ -14205,7 +14513,7 @@ class StringSharedKey : public HashTableKey {
private:
Handle<String> source_;
Handle<SharedFunctionInfo> shared_;
- StrictMode strict_mode_;
+ LanguageMode language_mode_;
int scope_position_;
};
@@ -14645,11 +14953,11 @@ Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
template Handle<Object>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::DeleteProperty(
- Handle<NameDictionary>, int, JSObject::DeleteMode);
+ Handle<NameDictionary>, int);
template Handle<Object>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- DeleteProperty(Handle<SeededNumberDictionary>, int, JSObject::DeleteMode);
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
+ uint32_t>::DeleteProperty(Handle<SeededNumberDictionary>, int);
template Handle<NameDictionary>
HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
@@ -14762,7 +15070,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
HandleScope scope(isolate);
Handle<Object> value(dict->ValueAt(i), isolate);
PropertyDetails details = dict->DetailsAt(i);
- if (details.type() == CALLBACKS || details.IsReadOnly()) {
+ if (details.type() == ACCESSOR_CONSTANT || details.IsReadOnly()) {
// Bail out and do the sorting of undefineds and array holes in JS.
// Also bail out if the element is not supposed to be moved.
return bailout;
@@ -14796,7 +15104,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
}
uint32_t result = pos;
- PropertyDetails no_details(NONE, FIELD, 0);
+ PropertyDetails no_details(NONE, DATA, 0);
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
// Adding an entry with the key beyond smi-range requires
@@ -15116,7 +15424,7 @@ Handle<Object> ExternalFloat32Array::SetValue(
Handle<ExternalFloat32Array> array,
uint32_t index,
Handle<Object> value) {
- float cast_value = static_cast<float>(base::OS::nan_value());
+ float cast_value = std::numeric_limits<float>::quiet_NaN();
if (index < static_cast<uint32_t>(array->length())) {
if (value->IsSmi()) {
int int_value = Handle<Smi>::cast(value)->value();
@@ -15139,7 +15447,7 @@ Handle<Object> ExternalFloat64Array::SetValue(
Handle<ExternalFloat64Array> array,
uint32_t index,
Handle<Object> value) {
- double double_value = base::OS::nan_value();
+ double double_value = std::numeric_limits<double>::quiet_NaN();
if (index < static_cast<uint32_t>(array->length())) {
if (value->IsNumber()) {
double_value = value->Number();
@@ -15173,16 +15481,14 @@ void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
}
-Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
- Handle<JSGlobalObject> global,
- Handle<Name> name) {
+Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
+ Handle<GlobalObject> global, Handle<Name> name) {
DCHECK(!global->HasFastProperties());
int entry = global->property_dictionary()->FindEntry(name);
if (entry == NameDictionary::kNotFound) {
Isolate* isolate = global->GetIsolate();
- Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(
- isolate->factory()->the_hole_value());
- PropertyDetails details(NONE, FIELD, 0);
+ Handle<PropertyCell> cell = isolate->factory()->NewPropertyCellWithHole();
+ PropertyDetails details(NONE, DATA, 0);
details = details.AsDeleted();
Handle<NameDictionary> dictionary = NameDictionary::Add(
handle(global->property_dictionary()), name, cell, details);
@@ -15349,11 +15655,11 @@ Handle<String> StringTable::LookupKey(Isolate* isolate, HashTableKey* key) {
Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
- Handle<Context> context) {
+ Handle<Context> context,
+ LanguageMode language_mode) {
Isolate* isolate = GetIsolate();
Handle<SharedFunctionInfo> shared(context->closure()->shared());
- StringSharedKey key(src, shared, FLAG_use_strict ? STRICT : SLOPPY,
- RelocInfo::kNoPosition);
+ StringSharedKey key(src, shared, language_mode, RelocInfo::kNoPosition);
int entry = FindEntry(&key);
if (entry == kNotFound) return isolate->factory()->undefined_value();
int index = EntryToIndex(entry);
@@ -15364,11 +15670,11 @@ Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
Handle<Object> CompilationCacheTable::LookupEval(
Handle<String> src, Handle<SharedFunctionInfo> outer_info,
- StrictMode strict_mode, int scope_position) {
+ LanguageMode language_mode, int scope_position) {
Isolate* isolate = GetIsolate();
// Cache key is the tuple (source, outer shared function info, scope position)
// to unambiguously identify the context chain the cached eval code assumes.
- StringSharedKey key(src, outer_info, strict_mode, scope_position);
+ StringSharedKey key(src, outer_info, language_mode, scope_position);
int entry = FindEntry(&key);
if (entry == kNotFound) return isolate->factory()->undefined_value();
int index = EntryToIndex(entry);
@@ -15390,11 +15696,10 @@ Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
Handle<CompilationCacheTable> CompilationCacheTable::Put(
Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> context, Handle<Object> value) {
+ Handle<Context> context, LanguageMode language_mode, Handle<Object> value) {
Isolate* isolate = cache->GetIsolate();
Handle<SharedFunctionInfo> shared(context->closure()->shared());
- StringSharedKey key(src, shared, FLAG_use_strict ? STRICT : SLOPPY,
- RelocInfo::kNoPosition);
+ StringSharedKey key(src, shared, language_mode, RelocInfo::kNoPosition);
{
Handle<Object> k = key.AsHandle(isolate);
DisallowHeapAllocation no_allocation_scope;
@@ -15422,7 +15727,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
int scope_position) {
Isolate* isolate = cache->GetIsolate();
- StringSharedKey key(src, outer_info, value->strict_mode(), scope_position);
+ StringSharedKey key(src, outer_info, value->language_mode(), scope_position);
{
Handle<Object> k = key.AsHandle(isolate);
DisallowHeapAllocation no_allocation_scope;
@@ -15627,17 +15932,12 @@ Handle<Derived> Dictionary<Derived, Shape, Key>::EnsureCapacity(
}
-template<typename Derived, typename Shape, typename Key>
+template <typename Derived, typename Shape, typename Key>
Handle<Object> Dictionary<Derived, Shape, Key>::DeleteProperty(
- Handle<Derived> dictionary,
- int entry,
- JSObject::DeleteMode mode) {
+ Handle<Derived> dictionary, int entry) {
Factory* factory = dictionary->GetIsolate()->factory();
PropertyDetails details = dictionary->DetailsAt(entry);
- // Ignore attributes if forcing a deletion.
- if (!details.IsConfigurable() && mode != JSReceiver::FORCE_DELETION) {
- return factory->false_value();
- }
+ if (!details.IsConfigurable()) return factory->false_value();
dictionary->SetEntry(
entry, factory->the_hole_value(), factory->the_hole_value());
@@ -15662,7 +15962,7 @@ Handle<Derived> Dictionary<Derived, Shape, Key>::AtPut(
#ifdef DEBUG
USE(Shape::AsHandle(dictionary->GetIsolate(), key));
#endif
- PropertyDetails details(NONE, FIELD, 0);
+ PropertyDetails details(NONE, DATA, 0);
AddEntry(dictionary, key, value, details, dictionary->Hash(key));
return dictionary;
@@ -15750,7 +16050,7 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::AddNumberEntry(
uint32_t key,
Handle<Object> value) {
SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
- return Add(dictionary, key, value, PropertyDetails(NONE, FIELD, 0));
+ return Add(dictionary, key, value, PropertyDetails(NONE, DATA, 0));
}
@@ -15838,7 +16138,7 @@ bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
if (DerivedHashTable::IsKey(k) && !FilterKey(k, NONE)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
- if (details.type() == CALLBACKS) return true;
+ if (details.type() == ACCESSOR_CONSTANT) return true;
PropertyAttributes attr = details.attributes();
if (attr & (READ_ONLY | DONT_DELETE | DONT_ENUM)) return true;
}
@@ -16029,7 +16329,7 @@ void ObjectHashTable::RemoveEntry(int entry) {
}
-Object* WeakHashTable::Lookup(Handle<Object> key) {
+Object* WeakHashTable::Lookup(Handle<HeapObject> key) {
DisallowHeapAllocation no_gc;
DCHECK(IsKey(*key));
int entry = FindEntry(key);
@@ -16039,36 +16339,31 @@ Object* WeakHashTable::Lookup(Handle<Object> key) {
Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table,
- Handle<Object> key,
- Handle<Object> value) {
+ Handle<HeapObject> key,
+ Handle<HeapObject> value) {
DCHECK(table->IsKey(*key));
int entry = table->FindEntry(key);
// Key is already in table, just overwrite value.
if (entry != kNotFound) {
- // TODO(ulan): Skipping write barrier is a temporary solution to avoid
- // memory leaks. Remove this once we have special visitor for weak fixed
- // arrays.
- table->set(EntryToValueIndex(entry), *value, SKIP_WRITE_BARRIER);
+ table->set(EntryToValueIndex(entry), *value);
return table;
}
+ Handle<WeakCell> key_cell = key->GetIsolate()->factory()->NewWeakCell(key);
+
// Check whether the hash table should be extended.
table = EnsureCapacity(table, 1, key, TENURED);
- table->AddEntry(table->FindInsertionEntry(table->Hash(key)), key, value);
+ table->AddEntry(table->FindInsertionEntry(table->Hash(key)), key_cell, value);
return table;
}
-void WeakHashTable::AddEntry(int entry,
- Handle<Object> key,
- Handle<Object> value) {
+void WeakHashTable::AddEntry(int entry, Handle<WeakCell> key_cell,
+ Handle<HeapObject> value) {
DisallowHeapAllocation no_allocation;
- // TODO(ulan): Skipping write barrier is a temporary solution to avoid
- // memory leaks. Remove this once we have special visitor for weak fixed
- // arrays.
- set(EntryToIndex(entry), *key, SKIP_WRITE_BARRIER);
- set(EntryToValueIndex(entry), *value, SKIP_WRITE_BARRIER);
+ set(EntryToIndex(entry), *key_cell);
+ set(EntryToValueIndex(entry), *value);
ElementAdded();
}
@@ -16476,58 +16771,6 @@ template void
OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::Transition();
-DeclaredAccessorDescriptorIterator::DeclaredAccessorDescriptorIterator(
- DeclaredAccessorDescriptor* descriptor)
- : array_(descriptor->serialized_data()->GetDataStartAddress()),
- length_(descriptor->serialized_data()->length()),
- offset_(0) {
-}
-
-
-const DeclaredAccessorDescriptorData*
- DeclaredAccessorDescriptorIterator::Next() {
- DCHECK(offset_ < length_);
- uint8_t* ptr = &array_[offset_];
- DCHECK(reinterpret_cast<uintptr_t>(ptr) % sizeof(uintptr_t) == 0);
- const DeclaredAccessorDescriptorData* data =
- reinterpret_cast<const DeclaredAccessorDescriptorData*>(ptr);
- offset_ += sizeof(*data);
- DCHECK(offset_ <= length_);
- return data;
-}
-
-
-Handle<DeclaredAccessorDescriptor> DeclaredAccessorDescriptor::Create(
- Isolate* isolate,
- const DeclaredAccessorDescriptorData& descriptor,
- Handle<DeclaredAccessorDescriptor> previous) {
- int previous_length =
- previous.is_null() ? 0 : previous->serialized_data()->length();
- int length = sizeof(descriptor) + previous_length;
- Handle<ByteArray> serialized_descriptor =
- isolate->factory()->NewByteArray(length);
- Handle<DeclaredAccessorDescriptor> value =
- isolate->factory()->NewDeclaredAccessorDescriptor();
- value->set_serialized_data(*serialized_descriptor);
- // Copy in the data.
- {
- DisallowHeapAllocation no_allocation;
- uint8_t* array = serialized_descriptor->GetDataStartAddress();
- if (previous_length != 0) {
- uint8_t* previous_array =
- previous->serialized_data()->GetDataStartAddress();
- MemCopy(array, previous_array, previous_length);
- array += previous_length;
- }
- DCHECK(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0);
- DeclaredAccessorDescriptorData* data =
- reinterpret_cast<DeclaredAccessorDescriptorData*>(array);
- *data = descriptor;
- }
- return value;
-}
-
-
// Check if there is a break point at this code position.
bool DebugInfo::HasBreakPoint(int code_position) {
// Get the break point info object for this code position.
@@ -17037,10 +17280,12 @@ Handle<Object> PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
const int kMaxLengthForInternalization = 200;
if ((cell->type()->Is(HeapType::None()) ||
cell->type()->Is(HeapType::Undefined())) &&
- value->IsString() &&
- Handle<String>::cast(value)->length() <= kMaxLengthForInternalization) {
- value = cell->GetIsolate()->factory()->InternalizeString(
- Handle<String>::cast(value));
+ value->IsString()) {
+ auto string = Handle<String>::cast(value);
+ if (string->length() <= kMaxLengthForInternalization &&
+ !string->map()->is_undetectable()) {
+ value = cell->GetIsolate()->factory()->InternalizeString(string);
+ }
}
cell->set_value(*value);
if (!HeapType::Any()->Is(cell->type())) {
@@ -17054,10 +17299,9 @@ Handle<Object> PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
// static
void PropertyCell::AddDependentCompilationInfo(Handle<PropertyCell> cell,
CompilationInfo* info) {
- Handle<DependentCode> codes =
- DependentCode::Insert(handle(cell->dependent_code(), info->isolate()),
- DependentCode::kPropertyCellChangedGroup,
- info->object_wrapper());
+ Handle<DependentCode> codes = DependentCode::InsertCompilationInfo(
+ handle(cell->dependent_code(), info->isolate()),
+ DependentCode::kPropertyCellChangedGroup, info->object_wrapper());
if (*codes != cell->dependent_code()) cell->set_dependent_code(*codes);
info->dependencies(DependentCode::kPropertyCellChangedGroup)->Add(
cell, info->zone());
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index f2f2e495b4..d4af4a6255 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -31,6 +31,8 @@
#include "src/mips/constants-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/constants-ppc.h" // NOLINT
#endif
@@ -128,9 +130,7 @@
// - SharedFunctionInfo
// - Struct
// - Box
-// - DeclaredAccessorDescriptor
// - AccessorInfo
-// - DeclaredAccessorInfo
// - ExecutableAccessorInfo
// - AccessorPair
// - AccessCheckInfo
@@ -140,7 +140,6 @@
// - FunctionTemplateInfo
// - ObjectTemplateInfo
// - Script
-// - SignatureInfo
// - TypeSwitchInfo
// - DebugInfo
// - BreakPointInfo
@@ -243,7 +242,7 @@ enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
// Indicates whether a value can be loaded as a constant.
-enum StoreMode { ALLOW_IN_DESCRIPTOR, FORCE_IN_OBJECT };
+enum StoreMode { ALLOW_IN_DESCRIPTOR, FORCE_FIELD };
// PropertyNormalizationMode is used to specify whether to keep
@@ -520,10 +519,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// manually.
#define STRUCT_LIST(V) \
V(BOX, Box, box) \
- V(DECLARED_ACCESSOR_DESCRIPTOR, \
- DeclaredAccessorDescriptor, \
- declared_accessor_descriptor) \
- V(DECLARED_ACCESSOR_INFO, DeclaredAccessorInfo, declared_accessor_info) \
V(EXECUTABLE_ACCESSOR_INFO, ExecutableAccessorInfo, executable_accessor_info)\
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
@@ -531,7 +526,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(SIGNATURE_INFO, SignatureInfo, signature_info) \
V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script) \
V(ALLOCATION_SITE, AllocationSite, allocation_site) \
@@ -856,6 +850,7 @@ class ConsString;
class DictionaryElementsAccessor;
class ElementsAccessor;
class FixedArrayBase;
+class FunctionLiteral;
class GlobalObject;
class LayoutDescriptor;
class LookupIterator;
@@ -863,6 +858,7 @@ class ObjectVisitor;
class StringStream;
class TypeFeedbackVector;
class WeakCell;
+
// We cannot just say "class HeapType;" if it is created from a template... =8-?
template<class> class TypeImpl;
struct HeapTypeConfig;
@@ -1021,8 +1017,6 @@ class Object {
CERTAINLY_NOT_STORE_FROM_KEYED
};
- enum StorePropertyMode { NORMAL_PROPERTY, SUPER_PROPERTY };
-
INLINE(bool IsFixedArrayBase() const);
INLINE(bool IsExternal() const);
INLINE(bool IsAccessorInfo() const);
@@ -1119,7 +1113,7 @@ class Object {
Handle<Context> context);
// Converts this to a Smi if possible.
- static MUST_USE_RESULT inline MaybeHandle<Smi> ToSmi(Isolate* isolate,
+ MUST_USE_RESULT static inline MaybeHandle<Smi> ToSmi(Isolate* isolate,
Handle<Object> object);
MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
@@ -1127,23 +1121,33 @@ class Object {
// Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
Handle<Object> object, Handle<Name> key, Handle<Object> value,
- StrictMode strict_mode,
+ LanguageMode language_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
- LookupIterator* it, Handle<Object> value, StrictMode strict_mode,
- StoreFromKeyed store_mode,
- StorePropertyMode data_store_mode = NORMAL_PROPERTY);
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
+ StoreFromKeyed store_mode);
+
+ MUST_USE_RESULT static MaybeHandle<Object> SetSuperProperty(
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
+ StoreFromKeyed store_mode);
+
MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
- LookupIterator* it, Handle<Object> value, StrictMode strict_mode);
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
+ Isolate* isolate, Handle<Object> reciever, Handle<Object> name,
+ Handle<Object> value, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyElement(
Isolate* isolate, Handle<Object> receiver, uint32_t index,
- Handle<Object> value, StrictMode strict_mode);
+ Handle<Object> value, LanguageMode language_mode);
+ MUST_USE_RESULT static MaybeHandle<Object> RedefineNonconfigurableProperty(
+ Isolate* isolate, Handle<Object> name, Handle<Object> value,
+ LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> SetDataProperty(
LookupIterator* it, Handle<Object> value);
MUST_USE_RESULT static MaybeHandle<Object> AddDataProperty(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- StrictMode strict_mode, StoreFromKeyed store_mode);
+ LanguageMode language_mode, StoreFromKeyed store_mode);
MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Handle<Object> object,
Handle<Name> key);
@@ -1163,7 +1167,7 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithAccessor(
Handle<Object> receiver, Handle<Name> name, Handle<Object> value,
Handle<JSObject> holder, Handle<Object> structure,
- StrictMode strict_mode);
+ LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
Handle<Object> receiver,
@@ -1186,7 +1190,7 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> SetElementWithReceiver(
Isolate* isolate, Handle<Object> object, Handle<Object> receiver,
- uint32_t index, Handle<Object> value, StrictMode strict_mode);
+ uint32_t index, Handle<Object> value, LanguageMode language_mode);
static inline Handle<Object> GetPrototypeSkipHiddenPrototypes(
Isolate* isolate, Handle<Object> receiver);
@@ -1258,6 +1262,11 @@ class Object {
// Return the map of the root of object's prototype chain.
Map* GetRootMap(Isolate* isolate);
+ // Helper for SetProperty and SetSuperProperty.
+ MUST_USE_RESULT static MaybeHandle<Object> SetPropertyInternal(
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
+ StoreFromKeyed store_mode, bool* found);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
@@ -1595,20 +1604,11 @@ enum AccessorComponent {
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
public:
- enum DeleteMode {
- NORMAL_DELETION,
- STRICT_DELETION,
- FORCE_DELETION
- };
-
DECLARE_CAST(JSReceiver)
MUST_USE_RESULT static MaybeHandle<Object> SetElement(
- Handle<JSReceiver> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictMode strict_mode);
+ Handle<JSReceiver> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, LanguageMode language_mode);
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
MUST_USE_RESULT static inline Maybe<bool> HasProperty(
@@ -1622,13 +1622,11 @@ class JSReceiver: public HeapObject {
// Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
- Handle<JSReceiver> object,
- Handle<Name> name,
- DeleteMode mode = NORMAL_DELETION);
+ Handle<JSReceiver> object, Handle<Name> name,
+ LanguageMode language_mode = SLOPPY);
MUST_USE_RESULT static MaybeHandle<Object> DeleteElement(
- Handle<JSReceiver> object,
- uint32_t index,
- DeleteMode mode = NORMAL_DELETION);
+ Handle<JSReceiver> object, uint32_t index,
+ LanguageMode language_mode = SLOPPY);
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
@@ -1946,34 +1944,28 @@ class JSObject: public JSReceiver {
Handle<JSObject> object, uint32_t index);
MUST_USE_RESULT static MaybeHandle<Object> SetFastElement(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictMode strict_mode,
- bool check_prototype);
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ LanguageMode language_mode, bool check_prototype);
+
+ MUST_USE_RESULT static inline MaybeHandle<Object> SetOwnElement(
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> SetOwnElement(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictMode strict_mode);
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, LanguageMode language_mode);
// Empty handle is returned if the element cannot be set to the given value.
MUST_USE_RESULT static MaybeHandle<Object> SetElement(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictMode strict_mode,
- bool check_prototype = true,
- SetPropertyMode set_mode = SET_PROPERTY);
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, LanguageMode language_mode,
+ bool check_prototype = true, SetPropertyMode set_mode = SET_PROPERTY);
// Returns the index'th element.
// The undefined object if index is out of bounds.
MUST_USE_RESULT static MaybeHandle<Object> GetElementWithInterceptor(
- Handle<JSObject> object,
- Handle<Object> receiver,
- uint32_t index);
+ Handle<JSObject> object, Handle<Object> receiver, uint32_t index,
+ bool check_prototype);
enum SetFastElementsCapacitySmiMode {
kAllowSmiElements,
@@ -2298,47 +2290,38 @@ class JSObject: public JSReceiver {
bool continue_search);
MUST_USE_RESULT static MaybeHandle<Object> SetElementWithCallback(
Handle<Object> object, Handle<Object> structure, uint32_t index,
- Handle<Object> value, Handle<JSObject> holder, StrictMode strict_mode);
+ Handle<Object> value, Handle<JSObject> holder,
+ LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> SetElementWithInterceptor(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictMode strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode);
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, LanguageMode language_mode,
+ bool check_prototype, SetPropertyMode set_mode);
MUST_USE_RESULT static MaybeHandle<Object> SetElementWithoutInterceptor(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictMode strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode);
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, LanguageMode language_mode,
+ bool check_prototype, SetPropertyMode set_mode);
MUST_USE_RESULT
static MaybeHandle<Object> SetElementWithCallbackSetterInPrototypes(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- bool* found,
- StrictMode strict_mode);
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ bool* found, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> SetDictionaryElement(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictMode strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode = SET_PROPERTY);
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, LanguageMode language_mode,
+ bool check_prototype, SetPropertyMode set_mode = SET_PROPERTY);
MUST_USE_RESULT static MaybeHandle<Object> SetFastDoubleElement(
- Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictMode strict_mode,
- bool check_prototype = true);
+ Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ LanguageMode language_mode, bool check_prototype = true);
+ MUST_USE_RESULT static MaybeHandle<Object> GetElementWithFailedAccessCheck(
+ Isolate* isolate, Handle<JSObject> object, Handle<Object> receiver,
+ uint32_t index);
+ MUST_USE_RESULT static Maybe<PropertyAttributes>
+ GetElementAttributesWithFailedAccessCheck(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ uint32_t index);
MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value, StrictMode strict_mode);
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode);
// Add a property to a slow-case object.
static void AddSlowProperty(Handle<JSObject> object,
@@ -2347,21 +2330,16 @@ class JSObject: public JSReceiver {
PropertyAttributes attributes);
MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
- Handle<JSObject> object,
- Handle<Name> name,
- DeleteMode mode);
+ Handle<JSObject> object, Handle<Name> name, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithInterceptor(
Handle<JSObject> holder, Handle<JSObject> receiver, Handle<Name> name);
- // Deletes the named property in a normalized object.
- static Handle<Object> DeleteNormalizedProperty(Handle<JSObject> object,
- Handle<Name> name,
- DeleteMode mode);
+ // Deletes an existing named property in a normalized object.
+ static void DeleteNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name);
MUST_USE_RESULT static MaybeHandle<Object> DeleteElement(
- Handle<JSObject> object,
- uint32_t index,
- DeleteMode mode);
+ Handle<JSObject> object, uint32_t index, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> DeleteElementWithInterceptor(
Handle<JSObject> object,
uint32_t index);
@@ -2571,7 +2549,7 @@ class FixedDoubleArray: public FixedArrayBase {
public:
// Setter and getter for elements.
inline double get_scalar(int index);
- inline int64_t get_representation(int index);
+ inline uint64_t get_representation(int index);
static inline Handle<Object> get(Handle<FixedDoubleArray> array, int index);
inline void set(int index, double value);
inline void set_the_hole(int index);
@@ -2592,10 +2570,6 @@ class FixedDoubleArray: public FixedArrayBase {
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
- inline static bool is_the_hole_nan(double value);
- inline static double hole_nan_as_double();
- inline static double canonical_not_the_hole_nan_as_double();
-
DECLARE_CAST(FixedDoubleArray)
// Maximal allowed size, in bytes, of a single FixedDoubleArray.
@@ -3562,10 +3536,7 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
void CopyValuesTo(FixedArray* elements);
// Delete a property from the dictionary.
- static Handle<Object> DeleteProperty(
- Handle<Derived> dictionary,
- int entry,
- JSObject::DeleteMode mode);
+ static Handle<Object> DeleteProperty(Handle<Derived> dictionary, int entry);
// Attempt to shrink the dictionary after deletion of key.
MUST_USE_RESULT static inline Handle<Derived> Shrink(
@@ -4091,9 +4062,9 @@ class WeakHashTableShape : public BaseShape<Handle<Object> > {
};
-// WeakHashTable maps keys that are arbitrary objects to object values.
-// It is used for the global weak hash table that maps objects
-// embedded in optimized code to dependent code lists.
+// WeakHashTable maps keys that are arbitrary heap objects to heap object
+// values. The table wraps the keys in weak cells and store values directly.
+// Thus it references keys weakly and values strongly.
class WeakHashTable: public HashTable<WeakHashTable,
WeakHashTableShape<2>,
Handle<Object> > {
@@ -4104,27 +4075,18 @@ class WeakHashTable: public HashTable<WeakHashTable,
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
- Object* Lookup(Handle<Object> key);
+ Object* Lookup(Handle<HeapObject> key);
// Adds (or overwrites) the value associated with the given key. Mapping a
// key to the hole value causes removal of the whole entry.
MUST_USE_RESULT static Handle<WeakHashTable> Put(Handle<WeakHashTable> table,
- Handle<Object> key,
- Handle<Object> value);
-
- // This function is called when heap verification is turned on.
- void Zap(Object* value) {
- int capacity = Capacity();
- for (int i = 0; i < capacity; i++) {
- set(EntryToIndex(i), value);
- set(EntryToValueIndex(i), value);
- }
- }
+ Handle<HeapObject> key,
+ Handle<HeapObject> value);
private:
friend class MarkCompactCollector;
- void AddEntry(int entry, Handle<Object> key, Handle<Object> value);
+ void AddEntry(int entry, Handle<WeakCell> key, Handle<HeapObject> value);
// Returns the index to the value of an entry.
static inline int EntryToValueIndex(int entry) {
@@ -4185,11 +4147,11 @@ class ScopeInfo : public FixedArray {
// Does this scope call eval?
bool CallsEval();
- // Return the strict mode of this scope.
- StrictMode strict_mode();
+ // Return the language mode of this scope.
+ LanguageMode language_mode();
// Does this scope make a sloppy eval call?
- bool CallsSloppyEval() { return CallsEval() && strict_mode() == SLOPPY; }
+ bool CallsSloppyEval() { return CallsEval() && is_sloppy(language_mode()); }
// Return the total number of locals allocated on the stack and in the
// context. This includes the parameters that are allocated in the context.
@@ -4225,6 +4187,10 @@ class ScopeInfo : public FixedArray {
// Return if this is a nested function within an asm module scope.
bool IsAsmFunction() { return AsmFunctionField::decode(Flags()); }
+ bool IsSimpleParameterList() {
+ return IsSimpleParameterListField::decode(Flags());
+ }
+
// Return the function_name if present.
String* FunctionName();
@@ -4286,7 +4252,7 @@ class ScopeInfo : public FixedArray {
Handle<JSObject> scope_object);
- static Handle<ScopeInfo> Create(Scope* scope, Zone* zone);
+ static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope);
// Serializes empty scope info.
static ScopeInfo* Empty(Isolate* isolate);
@@ -4376,11 +4342,14 @@ class ScopeInfo : public FixedArray {
// Properties of scopes.
class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
class CallsEvalField : public BitField<bool, 4, 1> {};
- class StrictModeField : public BitField<StrictMode, 5, 1> {};
- class FunctionVariableField : public BitField<FunctionVariableInfo, 6, 2> {};
- class FunctionVariableMode : public BitField<VariableMode, 8, 3> {};
- class AsmModuleField : public BitField<bool, 11, 1> {};
- class AsmFunctionField : public BitField<bool, 12, 1> {};
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ class LanguageModeField : public BitField<LanguageMode, 5, 2> {};
+ class FunctionVariableField : public BitField<FunctionVariableInfo, 7, 2> {};
+ class FunctionVariableMode : public BitField<VariableMode, 9, 3> {};
+ class AsmModuleField : public BitField<bool, 12, 1> {};
+ class AsmFunctionField : public BitField<bool, 13, 1> {};
+ class IsSimpleParameterListField
+ : public BitField<bool, AsmFunctionField::kNext, 1> {};
// BitFields representing the encoded information for context locals in the
// ContextLocalInfoEntries part.
@@ -4474,8 +4443,11 @@ class ByteArray: public FixedArrayBase {
};
-// FreeSpace represents fixed sized areas of the heap that are not currently in
-// use. Used by the heap and GC.
+// FreeSpace are fixed-size free memory blocks used by the heap and GC.
+// They look like heap objects (are heap object tagged and have a map) so that
+// the heap remains iterable. They have a size and a next pointer.
+// The next pointer is the raw address of the next FreeSpace object (or NULL)
+// in the free list.
class FreeSpace: public HeapObject {
public:
// [size]: size of the free space including the header.
@@ -4487,7 +4459,12 @@ class FreeSpace: public HeapObject {
inline int Size() { return size(); }
- DECLARE_CAST(FreeSpace)
+ // Accessors for the next field.
+ inline FreeSpace* next();
+ inline FreeSpace** next_address();
+ inline void set_next(FreeSpace* next);
+
+ inline static FreeSpace* cast(HeapObject* obj);
// Dispatched behavior.
DECLARE_PRINTER(FreeSpace)
@@ -4496,9 +4473,7 @@ class FreeSpace: public HeapObject {
// Layout description.
// Size is smi tagged when it is stored.
static const int kSizeOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kSizeOffset + kPointerSize;
-
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+ static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
@@ -4878,7 +4853,8 @@ class DeoptimizationInputData: public FixedArray {
static const int kOsrPcOffsetIndex = 4;
static const int kOptimizationIdIndex = 5;
static const int kSharedFunctionInfoIndex = 6;
- static const int kFirstDeoptEntryIndex = 7;
+ static const int kWeakCellCacheIndex = 7;
+ static const int kFirstDeoptEntryIndex = 8;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kAstIdRawOffset = 0;
@@ -4903,6 +4879,7 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DEFINE_ELEMENT_ACCESSORS(OptimizationId, Smi)
DEFINE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
+ DEFINE_ELEMENT_ACCESSORS(WeakCellCache, Object)
#undef DEFINE_ELEMENT_ACCESSORS
@@ -5158,6 +5135,11 @@ class Code: public HeapObject {
inline bool is_turbofanned();
inline void set_is_turbofanned(bool value);
+ // [can_have_weak_objects]: For kind OPTIMIZED_FUNCTION, tells whether the
+ // embedded objects in code should be treated weakly.
+ inline bool can_have_weak_objects();
+ inline void set_can_have_weak_objects(bool value);
+
// [optimizable]: For FUNCTION kind, tells if it is optimizable.
inline bool optimizable();
inline void set_optimizable(bool value);
@@ -5269,6 +5251,8 @@ class Code: public HeapObject {
// function replaces the corresponding placeholder in the code with the
// object-to-replace. The function assumes that pairs in the pattern come in
// the same order as the placeholders in the code.
+ // If the placeholder is a weak cell, then the value of weak cell is matched
+ // against the map-to-find.
void FindAndReplace(const FindAndReplacePattern& pattern);
// The entire code object including its header is copied verbatim to the
@@ -5412,18 +5396,26 @@ class Code: public HeapObject {
#endif
#ifdef DEBUG
- void VerifyEmbeddedObjectsInFullCode();
+ enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
+ void VerifyEmbeddedObjects(VerifyMode mode = kNoContextRetainingPointers);
#endif // DEBUG
- inline bool CanContainWeakObjects() { return is_optimized_code(); }
+ inline bool CanContainWeakObjects() {
+ // is_turbofanned() implies !can_have_weak_objects().
+ DCHECK(!is_optimized_code() || !is_turbofanned() ||
+ !can_have_weak_objects());
+ return is_optimized_code() && can_have_weak_objects();
+ }
inline bool IsWeakObject(Object* object) {
- return (is_optimized_code() && !is_turbofanned() &&
- IsWeakObjectInOptimizedCode(object));
+ return (CanContainWeakObjects() && IsWeakObjectInOptimizedCode(object));
}
static inline bool IsWeakObjectInOptimizedCode(Object* object);
+ static Handle<WeakCell> WeakCellFor(Handle<Code> code);
+ WeakCell* CachedWeakCell();
+
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -5485,9 +5477,10 @@ class Code: public HeapObject {
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kMarkedForDeoptimizationBit = kHasFunctionCacheBit + 1;
static const int kIsTurbofannedBit = kMarkedForDeoptimizationBit + 1;
+ static const int kCanHaveWeakObjects = kIsTurbofannedBit + 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
- STATIC_ASSERT(kIsTurbofannedBit + 1 <= 32);
+ STATIC_ASSERT(kCanHaveWeakObjects + 1 <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
@@ -5497,6 +5490,8 @@ class Code: public HeapObject {
: public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT
class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> {
}; // NOLINT
+ class CanHaveWeakObjectsField
+ : public BitField<bool, kCanHaveWeakObjects, 1> {}; // NOLINT
// KindSpecificFlags2 layout (ALL)
static const int kIsCrankshaftedBit = 0;
@@ -5565,9 +5560,9 @@ class CompilationInfo;
//
// The first n elements are Smis, each of them specifies the number of codes
// in the corresponding group. The subsequent elements contain grouped code
-// objects. The suffix of the array can be filled with the undefined value if
-// the number of codes is less than the length of the array. The order of the
-// code objects within a group is not preserved.
+// objects in weak cells. The suffix of the array can be filled with the
+// undefined value if the number of codes is less than the length of the
+// array. The order of the code objects within a group is not preserved.
//
// All code indexes used in the class are counted starting from the first
// code object of the first group. In other words, code index 0 corresponds
@@ -5621,15 +5616,21 @@ class DependentCode: public FixedArray {
int start_indexes_[kGroupCount + 1];
};
- bool Contains(DependencyGroup group, Code* code);
- static Handle<DependentCode> Insert(Handle<DependentCode> entries,
- DependencyGroup group,
- Handle<Object> object);
- void UpdateToFinishedCode(DependencyGroup group,
- CompilationInfo* info,
- Code* code);
+ bool Contains(DependencyGroup group, WeakCell* code_cell);
+
+ static Handle<DependentCode> InsertCompilationInfo(
+ Handle<DependentCode> entries, DependencyGroup group,
+ Handle<Foreign> info);
+
+ static Handle<DependentCode> InsertWeakCode(Handle<DependentCode> entries,
+ DependencyGroup group,
+ Handle<WeakCell> code_cell);
+
+ void UpdateToFinishedCode(DependencyGroup group, Foreign* info,
+ WeakCell* code_cell);
+
void RemoveCompilationInfo(DependentCode::DependencyGroup group,
- CompilationInfo* info);
+ Foreign* info);
void DeoptimizeDependentCodeGroup(Isolate* isolate,
DependentCode::DependencyGroup group);
@@ -5641,12 +5642,8 @@ class DependentCode: public FixedArray {
// and the mark compact collector.
inline int number_of_entries(DependencyGroup group);
inline void set_number_of_entries(DependencyGroup group, int value);
- inline bool is_code_at(int i);
- inline Code* code_at(int i);
- inline CompilationInfo* compilation_info_at(int i);
- inline void set_object_at(int i, Object* object);
- inline Object** slot_at(int i);
inline Object* object_at(int i);
+ inline void set_object_at(int i, Object* object);
inline void clear_at(int i);
inline void copy(int from, int to);
DECLARE_CAST(DependentCode)
@@ -5658,9 +5655,20 @@ class DependentCode: public FixedArray {
static void SetMarkedForDeoptimization(Code* code, DependencyGroup group);
private:
+ static Handle<DependentCode> Insert(Handle<DependentCode> entries,
+ DependencyGroup group,
+ Handle<Object> object);
+ static Handle<DependentCode> EnsureSpace(Handle<DependentCode> entries);
// Make a room at the end of the given group by moving out the first
// code objects of the subsequent groups.
inline void ExtendGroup(DependencyGroup group);
+ // Compact by removing cleared weak cells and return true if there was
+ // any cleared weak cell.
+ bool Compact();
+ static int Grow(int number_of_entries) {
+ if (number_of_entries < 5) return number_of_entries + 1;
+ return number_of_entries * 5 / 4;
+ }
static const int kCodesStartIndex = kGroupCount;
};
@@ -5901,23 +5909,15 @@ class Map: public HeapObject {
static void GeneralizeFieldType(Handle<Map> map, int modify_index,
Representation new_representation,
Handle<HeapType> new_field_type);
- static Handle<Map> GeneralizeRepresentation(
- Handle<Map> map,
- int modify_index,
- Representation new_representation,
- Handle<HeapType> new_field_type,
- StoreMode store_mode);
- static Handle<Map> CopyGeneralizeAllRepresentations(
- Handle<Map> map,
- int modify_index,
- StoreMode store_mode,
- PropertyAttributes attributes,
- const char* reason);
+ static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
+ PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<HeapType> new_field_type,
+ StoreMode store_mode);
static Handle<Map> CopyGeneralizeAllRepresentations(
- Handle<Map> map,
- int modify_index,
- StoreMode store_mode,
- const char* reason);
+ Handle<Map> map, int modify_index, StoreMode store_mode,
+ PropertyKind kind, PropertyAttributes attributes, const char* reason);
static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
int descriptor_number,
@@ -5995,17 +5995,12 @@ class Map: public HeapObject {
// when we change object's prototype to a new one.
// Cache format:
// 0: finger - index of the first free cell in the cache
- // 1: back pointer that overlaps with prototype transitions field.
- // 2 + 2 * i: prototype
- // 3 + 2 * i: target map
+ // 1 + i: target map
inline FixedArray* GetPrototypeTransitions();
inline bool HasPrototypeTransitions();
- static const int kProtoTransitionHeaderSize = 1;
static const int kProtoTransitionNumberOfEntriesOffset = 0;
- static const int kProtoTransitionElementsPerEntry = 2;
- static const int kProtoTransitionPrototypeOffset = 0;
- static const int kProtoTransitionMapOffset = 1;
+ static const int kProtoTransitionHeaderSize = 1;
inline int NumberOfProtoTransitions() {
FixedArray* cache = GetPrototypeTransitions();
@@ -6020,17 +6015,6 @@ class Map: public HeapObject {
cache->set(kProtoTransitionNumberOfEntriesOffset, Smi::FromInt(value));
}
- // Lookup in the map's instance descriptors and fill out the result
- // with the given holder if the name is found. The holder may be
- // NULL when this function is used from the compiler.
- inline void LookupDescriptor(JSObject* holder,
- Name* name,
- LookupResult* result);
-
- inline void LookupTransition(JSObject* holder, Name* name,
- PropertyAttributes attributes,
- LookupResult* result);
-
inline PropertyDetails GetLastDescriptorDetails();
// The size of transition arrays are limited so they do not end up in large
@@ -6145,11 +6129,18 @@ class Map: public HeapObject {
static Handle<Map> TransitionToAccessorProperty(
Handle<Map> map, Handle<Name> name, AccessorComponent component,
Handle<Object> accessor, PropertyAttributes attributes);
- static Handle<Map> ReconfigureDataProperty(Handle<Map> map, int descriptor,
- PropertyAttributes attributes);
+ static Handle<Map> ReconfigureExistingProperty(Handle<Map> map,
+ int descriptor,
+ PropertyKind kind,
+ PropertyAttributes attributes);
inline void AppendDescriptor(Descriptor* desc);
+ // Returns a copy of the map, prepared for inserting into the transition
+ // tree (if the |map| owns descriptors then the new one will share
+ // descriptors with |map|).
+ static Handle<Map> CopyForTransition(Handle<Map> map, const char* reason);
+
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
static Handle<Map> Copy(Handle<Map> map, const char* reason);
@@ -6426,6 +6417,9 @@ class Map: public HeapObject {
Descriptor* descriptor,
int index,
TransitionFlag flag);
+ static MUST_USE_RESULT MaybeHandle<Map> TryReconfigureExistingProperty(
+ Handle<Map> map, int descriptor, PropertyKind kind,
+ PropertyAttributes attributes, const char** reason);
static Handle<Map> CopyNormalized(Handle<Map> map,
PropertyNormalizationMode mode);
@@ -6459,6 +6453,8 @@ class Map: public HeapObject {
Representation new_representation,
Handle<HeapType> new_type);
+ void PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
+ PropertyAttributes attributes);
void PrintGeneralization(FILE* file,
const char* reason,
int modify_index,
@@ -6594,6 +6590,12 @@ class Script: public Struct {
inline CompilationState compilation_state();
inline void set_compilation_state(CompilationState state);
+ // [is_embedder_debug_script]: An opaque boolean set by the embedder via
+ // ScriptOrigin, and used by the embedder to make decisions about the
+ // script's origin. V8 just passes this through. Encoded in
+ // the 'flags' field.
+ DECL_BOOLEAN_ACCESSORS(is_embedder_debug_script)
+
// [is_shared_cross_origin]: An opaque boolean set by the embedder via
// ScriptOrigin, and used by the embedder to make decisions about the
// script's level of privilege. V8 just passes this through. Encoded in
@@ -6650,7 +6652,8 @@ class Script: public Struct {
// Bit positions in the flags field.
static const int kCompilationTypeBit = 0;
static const int kCompilationStateBit = 1;
- static const int kIsSharedCrossOriginBit = 2;
+ static const int kIsEmbedderDebugScriptBit = 2;
+ static const int kIsSharedCrossOriginBit = 3;
DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
};
@@ -6742,11 +6745,19 @@ class SharedFunctionInfo: public HeapObject {
// Removed a specific optimized code object from the optimized code map.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
+ // Unconditionally clear the type feedback vector (including vector ICs).
void ClearTypeFeedbackInfo();
+ // Clear the type feedback vector with a more subtle policy at GC time.
+ void ClearTypeFeedbackInfoAtGCTime();
+
// Trims the optimized code map after entries have been removed.
void TrimOptimizedCodeMap(int shrink_by);
+ // Initialize a SharedFunctionInfo from a parsed function literal.
+ static void InitFromFunctionLiteral(Handle<SharedFunctionInfo> shared_info,
+ FunctionLiteral* lit);
+
// Add a new entry to the optimized code map.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
@@ -6778,9 +6789,11 @@ class SharedFunctionInfo: public HeapObject {
inline int length() const;
inline void set_length(int value);
- // [formal parameter count]: The declared number of parameters.
- inline int formal_parameter_count() const;
- inline void set_formal_parameter_count(int value);
+ // [internal formal parameter count]: The declared number of parameters.
+ // For subclass constructors, also includes new.target.
+ // The size of function's frame is internal_formal_parameter_count + 1.
+ inline int internal_formal_parameter_count() const;
+ inline void set_internal_formal_parameter_count(int value);
// Set the formal parameter count so the function code will be
// called without using argument adaptor frames.
@@ -6898,8 +6911,8 @@ class SharedFunctionInfo: public HeapObject {
DECL_BOOLEAN_ACCESSORS(optimization_disabled)
// Indicates the language mode.
- inline StrictMode strict_mode();
- inline void set_strict_mode(StrictMode strict_mode);
+ inline LanguageMode language_mode();
+ inline void set_language_mode(LanguageMode language_mode);
// False if the function definitely does not allocate an arguments object.
DECL_BOOLEAN_ACCESSORS(uses_arguments)
@@ -6908,9 +6921,6 @@ class SharedFunctionInfo: public HeapObject {
// This is needed to set up the [[HomeObject]] on the function instance.
DECL_BOOLEAN_ACCESSORS(uses_super_property)
- // Indicates that this function uses the super constructor.
- DECL_BOOLEAN_ACCESSORS(uses_super_constructor_call)
-
// True if the function has any duplicated parameter names.
DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
@@ -6955,6 +6965,9 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that this function is a concise method.
DECL_BOOLEAN_ACCESSORS(is_concise_method)
+ // Indicates that this function is an accessor (getter or setter).
+ DECL_BOOLEAN_ACCESSORS(is_accessor_function)
+
// Indicates that this function is a default constructor.
DECL_BOOLEAN_ACCESSORS(is_default_constructor)
@@ -7031,6 +7044,8 @@ class SharedFunctionInfo: public HeapObject {
// Calculate the number of in-object properties.
int CalculateInObjectProperties();
+ inline bool is_simple_parameter_list();
+
// Dispatched behavior.
DECLARE_PRINTER(SharedFunctionInfo)
DECLARE_VERIFIER(SharedFunctionInfo)
@@ -7062,7 +7077,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kUniqueIdOffset = kFeedbackVectorOffset + kPointerSize;
static const int kLastPointerFieldOffset = kUniqueIdOffset;
#else
- // Just to not break the postmortem support with conditional offsets
+ // Just to not break the postmortrem support with conditional offsets
static const int kUniqueIdOffset = kFeedbackVectorOffset;
static const int kLastPointerFieldOffset = kFeedbackVectorOffset;
#endif
@@ -7189,9 +7204,9 @@ class SharedFunctionInfo: public HeapObject {
kAllowLazyCompilationWithoutContext,
kOptimizationDisabled,
kStrictModeFunction,
+ kStrongModeFunction,
kUsesArguments,
kUsesSuperProperty,
- kUsesSuperConstructorCall,
kHasDuplicateParameters,
kNative,
kInlineBuiltin,
@@ -7204,13 +7219,18 @@ class SharedFunctionInfo: public HeapObject {
kIsArrow,
kIsGenerator,
kIsConciseMethod,
+ kIsAccessorFunction,
kIsDefaultConstructor,
+ kIsBaseConstructor,
+ kIsSubclassConstructor,
kIsAsmFunction,
kDeserialized,
kCompilerHintsCount // Pseudo entry
};
+ // Add hints for other modes when they're added.
+ STATIC_ASSERT(LANGUAGE_END == 3);
- class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 4> {};
+ class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 7> {};
class DeoptCountBits : public BitField<int, 0, 4> {};
class OptReenableTriesBits : public BitField<int, 4, 18> {};
@@ -7532,6 +7552,11 @@ class JSFunction: public JSObject {
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
+ // Returns `false` if formal parameters include rest parameters, optional
+ // parameters, or destructuring parameters.
+ // TODO(caitp): make this a flag set during parsing
+ inline bool is_simple_parameter_list();
+
// [next_function_link]: Links functions into various lists, e.g. the list
// of optimized functions hanging off the native_context. The CodeFlusher
// uses this link to chain together flushing candidates. Treated weakly
@@ -7642,6 +7667,9 @@ class GlobalObject: public JSObject {
static void InvalidatePropertyCell(Handle<GlobalObject> object,
Handle<Name> name);
+ // Ensure that the global object has a cell for the given property name.
+ static Handle<PropertyCell> EnsurePropertyCell(Handle<GlobalObject> global,
+ Handle<Name> name);
// Layout description.
static const int kBuiltinsOffset = JSObject::kHeaderSize;
@@ -7659,10 +7687,6 @@ class JSGlobalObject: public GlobalObject {
public:
DECLARE_CAST(JSGlobalObject)
- // Ensure that the global object has a cell for the given property name.
- static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global,
- Handle<Name> name);
-
inline bool IsDetached();
// Dispatched behavior.
@@ -7910,7 +7934,8 @@ class JSRegExp: public JSObject {
GLOBAL = 1,
IGNORE_CASE = 2,
MULTILINE = 4,
- STICKY = 8
+ STICKY = 8,
+ UNICODE_ESCAPES = 16
};
class Flags {
@@ -7920,6 +7945,7 @@ class JSRegExp: public JSObject {
bool is_ignore_case() { return (value_ & IGNORE_CASE) != 0; }
bool is_multiline() { return (value_ & MULTILINE) != 0; }
bool is_sticky() { return (value_ & STICKY) != 0; }
+ bool is_unicode() { return (value_ & UNICODE_ESCAPES) != 0; }
uint32_t value() { return value_; }
private:
uint32_t value_;
@@ -8063,14 +8089,16 @@ class CompilationCacheTable: public HashTable<CompilationCacheTable,
HashTableKey*> {
public:
// Find cached value for a string key, otherwise return null.
- Handle<Object> Lookup(Handle<String> src, Handle<Context> context);
- Handle<Object> LookupEval(Handle<String> src,
- Handle<SharedFunctionInfo> shared,
- StrictMode strict_mode, int scope_position);
+ Handle<Object> Lookup(
+ Handle<String> src, Handle<Context> context, LanguageMode language_mode);
+ Handle<Object> LookupEval(
+ Handle<String> src, Handle<SharedFunctionInfo> shared,
+ LanguageMode language_mode, int scope_position);
Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
static Handle<CompilationCacheTable> Put(
Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> context, Handle<Object> value);
+ Handle<Context> context, LanguageMode language_mode,
+ Handle<Object> value);
static Handle<CompilationCacheTable> PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<SharedFunctionInfo> context, Handle<SharedFunctionInfo> value,
@@ -8463,7 +8491,7 @@ class AllocationSite: public Struct {
// During mark compact we need to take special care for the dependent code
// field.
static const int kPointerFieldsBeginOffset = kTransitionInfoOffset;
- static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+ static const int kPointerFieldsEndOffset = kWeakNextOffset;
// For other visitors, use the fixed body descriptor below.
typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
@@ -8845,6 +8873,22 @@ class String: public Name {
<< ArrayIndexLengthBits::kShift) |
kIsNotArrayIndexMask;
+ class SubStringRange {
+ public:
+ explicit SubStringRange(String* string, int first = 0, int length = -1)
+ : string_(string),
+ first_(first),
+ length_(length == -1 ? string->length() : length) {}
+ class iterator;
+ inline iterator begin();
+ inline iterator end();
+
+ private:
+ String* string_;
+ int first_;
+ int length_;
+ };
+
// Representation of the flat content of a String.
// A non-flat string doesn't have flat content.
// A flat string has content that's encoded as a sequence of either
@@ -8879,6 +8923,10 @@ class String: public Name {
return twobyte_start[i];
}
+ bool UsesSameString(const FlatContent& other) const {
+ return onebyte_start == other.onebyte_start;
+ }
+
private:
enum State { NON_FLAT, ONE_BYTE, TWO_BYTE };
@@ -8897,6 +8945,7 @@ class String: public Name {
State state_;
friend class String;
+ friend class IterableSubString;
};
template <typename Char>
@@ -8961,10 +9010,6 @@ class String: public Name {
// Requires: StringShape(this).IsIndirect() && this->IsFlat()
inline String* GetUnderlying();
- // Mark the string as an undetectable object. It only applies to
- // one-byte and two-byte string types.
- bool MarkAsUndetectable();
-
// String equality operations.
inline bool Equals(String* other);
inline static bool Equals(Handle<String> one, Handle<String> two);
@@ -9714,7 +9759,7 @@ class PropertyCell: public Cell {
static const int kSize = kDependentCodeOffset + kPointerSize;
static const int kPointerFieldsBeginOffset = kValueOffset;
- static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+ static const int kPointerFieldsEndOffset = kSize;
typedef FixedBodyDescriptor<kValueOffset,
kSize,
@@ -9784,7 +9829,7 @@ class JSProxy: public JSReceiver {
MUST_USE_RESULT
static MaybeHandle<Object> SetPropertyViaPrototypesWithHandler(
Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, StrictMode strict_mode, bool* done);
+ Handle<Object> value, LanguageMode language_mode, bool* done);
MUST_USE_RESULT static Maybe<PropertyAttributes>
GetPropertyAttributesWithHandler(Handle<JSProxy> proxy,
@@ -9796,7 +9841,7 @@ class JSProxy: public JSReceiver {
uint32_t index);
MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithHandler(
Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, StrictMode strict_mode);
+ Handle<Object> value, LanguageMode language_mode);
// Turn the proxy into an (empty) JSObject.
static void Fix(Handle<JSProxy> proxy);
@@ -9837,11 +9882,8 @@ class JSProxy: public JSReceiver {
friend class JSReceiver;
MUST_USE_RESULT static inline MaybeHandle<Object> SetElementWithHandler(
- Handle<JSProxy> proxy,
- Handle<JSReceiver> receiver,
- uint32_t index,
- Handle<Object> value,
- StrictMode strict_mode);
+ Handle<JSProxy> proxy, Handle<JSReceiver> receiver, uint32_t index,
+ Handle<Object> value, LanguageMode language_mode);
MUST_USE_RESULT static Maybe<bool> HasPropertyWithHandler(
Handle<JSProxy> proxy, Handle<Name> name);
@@ -9849,13 +9891,9 @@ class JSProxy: public JSReceiver {
Handle<JSProxy> proxy, uint32_t index);
MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithHandler(
- Handle<JSProxy> proxy,
- Handle<Name> name,
- DeleteMode mode);
+ Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> DeleteElementWithHandler(
- Handle<JSProxy> proxy,
- uint32_t index,
- DeleteMode mode);
+ Handle<JSProxy> proxy, uint32_t index, LanguageMode language_mode);
MUST_USE_RESULT Object* GetIdentityHash();
@@ -10286,9 +10324,6 @@ class JSArray: public JSObject {
static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
static MaybeHandle<Object> ReadOnlyLengthError(Handle<JSArray> array);
- // TODO(adamk): Remove this method in favor of HasReadOnlyLength().
- static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
-
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
// capacity is non-zero.
@@ -10373,9 +10408,9 @@ class AccessorInfo: public Struct {
inline void set_property_attributes(PropertyAttributes attributes);
// Checks whether the given receiver is compatible with this accessor.
- static bool IsCompatibleReceiverType(Isolate* isolate,
- Handle<AccessorInfo> info,
- Handle<HeapType> type);
+ static bool IsCompatibleReceiverMap(Isolate* isolate,
+ Handle<AccessorInfo> info,
+ Handle<Map> map);
inline bool IsCompatibleReceiver(Object* receiver);
DECLARE_CAST(AccessorInfo)
@@ -10407,115 +10442,6 @@ class AccessorInfo: public Struct {
};
-enum AccessorDescriptorType {
- kDescriptorBitmaskCompare,
- kDescriptorPointerCompare,
- kDescriptorPrimitiveValue,
- kDescriptorObjectDereference,
- kDescriptorPointerDereference,
- kDescriptorPointerShift,
- kDescriptorReturnObject
-};
-
-
-struct BitmaskCompareDescriptor {
- uint32_t bitmask;
- uint32_t compare_value;
- uint8_t size; // Must be in {1,2,4}.
-};
-
-
-struct PointerCompareDescriptor {
- void* compare_value;
-};
-
-
-struct PrimitiveValueDescriptor {
- v8::DeclaredAccessorDescriptorDataType data_type;
- uint8_t bool_offset; // Must be in [0,7], used for kDescriptorBoolType.
-};
-
-
-struct ObjectDerefenceDescriptor {
- uint8_t internal_field;
-};
-
-
-struct PointerShiftDescriptor {
- int16_t byte_offset;
-};
-
-
-struct DeclaredAccessorDescriptorData {
- AccessorDescriptorType type;
- union {
- struct BitmaskCompareDescriptor bitmask_compare_descriptor;
- struct PointerCompareDescriptor pointer_compare_descriptor;
- struct PrimitiveValueDescriptor primitive_value_descriptor;
- struct ObjectDerefenceDescriptor object_dereference_descriptor;
- struct PointerShiftDescriptor pointer_shift_descriptor;
- };
-};
-
-
-class DeclaredAccessorDescriptor;
-
-
-class DeclaredAccessorDescriptorIterator {
- public:
- explicit DeclaredAccessorDescriptorIterator(
- DeclaredAccessorDescriptor* descriptor);
- const DeclaredAccessorDescriptorData* Next();
- bool Complete() const { return length_ == offset_; }
- private:
- uint8_t* array_;
- const int length_;
- int offset_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorDescriptorIterator);
-};
-
-
-class DeclaredAccessorDescriptor: public Struct {
- public:
- DECL_ACCESSORS(serialized_data, ByteArray)
-
- DECLARE_CAST(DeclaredAccessorDescriptor)
-
- static Handle<DeclaredAccessorDescriptor> Create(
- Isolate* isolate,
- const DeclaredAccessorDescriptorData& data,
- Handle<DeclaredAccessorDescriptor> previous);
-
- // Dispatched behavior.
- DECLARE_PRINTER(DeclaredAccessorDescriptor)
- DECLARE_VERIFIER(DeclaredAccessorDescriptor)
-
- static const int kSerializedDataOffset = HeapObject::kHeaderSize;
- static const int kSize = kSerializedDataOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorDescriptor);
-};
-
-
-class DeclaredAccessorInfo: public AccessorInfo {
- public:
- DECL_ACCESSORS(descriptor, DeclaredAccessorDescriptor)
-
- DECLARE_CAST(DeclaredAccessorInfo)
-
- // Dispatched behavior.
- DECLARE_PRINTER(DeclaredAccessorInfo)
- DECLARE_VERIFIER(DeclaredAccessorInfo)
-
- static const int kDescriptorOffset = AccessorInfo::kSize;
- static const int kSize = kDescriptorOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorInfo);
-};
-
-
// An accessor must have a getter, but can have no setter.
//
// When setting a property, V8 searches accessors in prototypes.
@@ -10585,6 +10511,14 @@ class AccessorPair: public Struct {
if (!setter->IsNull()) set_setter(setter);
}
+ bool Equals(AccessorPair* pair) {
+ return (this == pair) || pair->Equals(getter(), setter());
+ }
+
+ bool Equals(Object* getter_value, Object* setter_value) {
+ return (getter() == getter_value) && (setter() == setter_value);
+ }
+
bool ContainsAccessor() {
return IsJSAccessor(getter()) || IsJSAccessor(setter());
}
@@ -10642,6 +10576,7 @@ class InterceptorInfo: public Struct {
DECL_ACCESSORS(enumerator, Object)
DECL_ACCESSORS(data, Object)
DECL_BOOLEAN_ACCESSORS(can_intercept_symbols)
+ DECL_BOOLEAN_ACCESSORS(all_can_read)
inline int flags() const;
inline void set_flags(int flags);
@@ -10662,6 +10597,7 @@ class InterceptorInfo: public Struct {
static const int kSize = kFlagsOffset + kPointerSize;
static const int kCanInterceptSymbolsBit = 0;
+ static const int kAllCanReadBit = 1;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
@@ -10734,6 +10670,7 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_BOOLEAN_ACCESSORS(read_only_prototype)
DECL_BOOLEAN_ACCESSORS(remove_prototype)
DECL_BOOLEAN_ACCESSORS(do_not_cache)
+ DECL_BOOLEAN_ACCESSORS(instantiated)
DECLARE_CAST(FunctionTemplateInfo)
@@ -10766,6 +10703,10 @@ class FunctionTemplateInfo: public TemplateInfo {
bool IsTemplateFor(Object* object);
bool IsTemplateFor(Map* map);
+ // Returns the holder JSObject if the function can legally be called with this
+ // receiver. Returns Heap::null_value() if the call is illegal.
+ Object* GetCompatibleReceiver(Isolate* isolate, Object* receiver);
+
private:
// Bit position in the flag, from least significant bit position.
static const int kHiddenPrototypeBit = 0;
@@ -10774,6 +10715,7 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kReadOnlyPrototypeBit = 3;
static const int kRemovePrototypeBit = 4;
static const int kDoNotCacheBit = 5;
+ static const int kInstantiatedBit = 6;
DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
};
@@ -10797,26 +10739,6 @@ class ObjectTemplateInfo: public TemplateInfo {
};
-class SignatureInfo: public Struct {
- public:
- DECL_ACCESSORS(receiver, Object)
- DECL_ACCESSORS(args, Object)
-
- DECLARE_CAST(SignatureInfo)
-
- // Dispatched behavior.
- DECLARE_PRINTER(SignatureInfo)
- DECLARE_VERIFIER(SignatureInfo)
-
- static const int kReceiverOffset = Struct::kHeaderSize;
- static const int kArgsOffset = kReceiverOffset + kPointerSize;
- static const int kSize = kArgsOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SignatureInfo);
-};
-
-
class TypeSwitchInfo: public Struct {
public:
DECL_ACCESSORS(types, Object)
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index 6926f47ef2..5999df9d6d 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -53,25 +53,28 @@ class OptimizingCompilerThread::CompileTask : public v8::Task {
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
-
OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
- if (thread->recompilation_delay_ != 0) {
- base::OS::Sleep(thread->recompilation_delay_);
- }
+ {
+ TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
+
+ if (thread->recompilation_delay_ != 0) {
+ base::OS::Sleep(thread->recompilation_delay_);
+ }
- StopFlag flag;
- OptimizedCompileJob* job = thread->NextInput(&flag);
+ StopFlag flag;
+ OptimizedCompileJob* job = thread->NextInput(&flag);
- if (flag == CONTINUE) {
- thread->CompileNext(job);
- } else {
- AllowHandleDereference allow_handle_dereference;
- if (!job->info()->is_osr()) {
- DisposeOptimizedCompileJob(job, true);
+ if (flag == CONTINUE) {
+ thread->CompileNext(job);
+ } else {
+ AllowHandleDereference allow_handle_dereference;
+ if (!job->info()->is_osr()) {
+ DisposeOptimizedCompileJob(job, true);
+ }
}
}
+
bool signal = false;
{
base::LockGuard<base::RecursiveMutex> lock(&thread->task_count_mutex_);
@@ -99,7 +102,7 @@ OptimizingCompilerThread::~OptimizingCompilerThread() {
if (FLAG_concurrent_osr) {
#ifdef DEBUG
for (int i = 0; i < osr_buffer_capacity_; i++) {
- CHECK_EQ(NULL, osr_buffer_[i]);
+ CHECK_NULL(osr_buffer_[i]);
}
#endif
DeleteArray(osr_buffer_);
@@ -175,7 +178,7 @@ OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) {
return NULL;
}
OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
- DCHECK_NE(NULL, job);
+ DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
if (flag) {
@@ -186,7 +189,7 @@ OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) {
void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
- DCHECK_NE(NULL, job);
+ DCHECK_NOT_NULL(job);
// The function may have already been optimized by OSR. Simply continue.
OptimizedCompileJob::Status status = job->OptimizeGraph();
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index ee0474d2c0..b11332dafc 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -17,7 +17,7 @@ OFStreamBase::OFStreamBase(FILE* f) : f_(f) {}
OFStreamBase::~OFStreamBase() {}
-OFStreamBase::int_type OFStreamBase::sync() {
+int OFStreamBase::sync() {
std::fflush(f_);
return 0;
}
@@ -28,8 +28,15 @@ OFStreamBase::int_type OFStreamBase::overflow(int_type c) {
}
-OFStream::OFStream(FILE* f) : OFStreamBase(f), std::ostream(this) {
+std::streamsize OFStreamBase::xsputn(const char* s, std::streamsize n) {
+ return static_cast<std::streamsize>(
+ std::fwrite(s, 1, static_cast<size_t>(n), f_));
+}
+
+
+OFStream::OFStream(FILE* f) : std::ostream(nullptr), buf_(f) {
DCHECK_NOT_NULL(f);
+ rdbuf(&buf_);
}
@@ -59,6 +66,14 @@ std::ostream& operator<<(std::ostream& os, const AsReversiblyEscapedUC16& c) {
}
+std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c) {
+ if (c.value == '\n') return os << "\\n";
+ if (c.value == '\r') return os << "\\r";
+ if (c.value == '\"') return os << "\\\"";
+ return PrintUC16(os, c.value, IsOK);
+}
+
+
std::ostream& operator<<(std::ostream& os, const AsUC16& c) {
return PrintUC16(os, c.value, IsPrint);
}
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index 56787f7c12..6f8600e7b1 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -17,29 +17,29 @@
namespace v8 {
namespace internal {
+
class OFStreamBase : public std::streambuf {
- protected:
+ public:
explicit OFStreamBase(FILE* f);
virtual ~OFStreamBase();
- int_type sync() FINAL;
- int_type overflow(int_type c) FINAL;
-
- private:
+ protected:
FILE* const f_;
- DISALLOW_COPY_AND_ASSIGN(OFStreamBase);
+ virtual int sync();
+ virtual int_type overflow(int_type c);
+ virtual std::streamsize xsputn(const char* s, std::streamsize n);
};
// An output stream writing to a file.
-class OFStream FINAL : private virtual OFStreamBase, public std::ostream {
+class OFStream : public std::ostream {
public:
explicit OFStream(FILE* f);
~OFStream();
private:
- DISALLOW_COPY_AND_ASSIGN(OFStream);
+ OFStreamBase buf_;
};
@@ -55,12 +55,20 @@ struct AsReversiblyEscapedUC16 {
uint16_t value;
};
+struct AsEscapedUC16ForJSON {
+ explicit AsEscapedUC16ForJSON(uint16_t v) : value(v) {}
+ uint16_t value;
+};
+
// Writes the given character to the output escaping everything outside of
// printable/space ASCII range. Additionally escapes '\' making escaping
// reversible.
std::ostream& operator<<(std::ostream& os, const AsReversiblyEscapedUC16& c);
+// Same as AsReversiblyEscapedUC16 with additional escaping of \n, \r, " and '.
+std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c);
+
// Writes the given character to the output escaping everything outside
// of printable ASCII range.
std::ostream& operator<<(std::ostream& os, const AsUC16& c);
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index bfdeaa3276..985a90f8dc 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -251,27 +251,18 @@ int ParseData::FunctionsSize() {
}
-void Parser::SetCachedData() {
- if (compile_options() == ScriptCompiler::kNoCompileOptions) {
+void Parser::SetCachedData(CompilationInfo* info) {
+ if (compile_options_ == ScriptCompiler::kNoCompileOptions) {
cached_parse_data_ = NULL;
} else {
- DCHECK(info_->cached_data() != NULL);
- if (compile_options() == ScriptCompiler::kConsumeParserCache) {
- cached_parse_data_ = ParseData::FromCachedData(*info_->cached_data());
+ DCHECK(info->cached_data() != NULL);
+ if (compile_options_ == ScriptCompiler::kConsumeParserCache) {
+ cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
}
}
}
-Scope* Parser::NewScope(Scope* parent, ScopeType scope_type) {
- DCHECK(ast_value_factory());
- Scope* result =
- new (zone()) Scope(parent, scope_type, ast_value_factory(), zone());
- result->Initialize();
- return result;
-}
-
-
FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
int pos, int end_pos) {
int materialized_literal_count = -1;
@@ -280,8 +271,12 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
int parameter_count = 0;
const AstRawString* name = ast_value_factory()->empty_string();
- Scope* function_scope = NewScope(scope, FUNCTION_SCOPE);
- function_scope->SetStrictMode(STRICT);
+
+ FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
+ : FunctionKind::kDefaultBaseConstructor;
+ Scope* function_scope = NewScope(scope, FUNCTION_SCOPE, kind);
+ function_scope->SetLanguageMode(
+ static_cast<LanguageMode>(scope->language_mode() | STRICT_BIT));
// Set start and end position to the same value
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
@@ -290,18 +285,18 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
{
AstNodeFactory function_factory(ast_value_factory());
FunctionState function_state(&function_state_, &scope_, function_scope,
- &function_factory);
+ kind, &function_factory);
- body = new (zone()) ZoneList<Statement*>(1, zone());
+ body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
+ AddAssertIsConstruct(body, pos);
if (call_super) {
ZoneList<Expression*>* args =
new (zone()) ZoneList<Expression*>(0, zone());
CallRuntime* call = factory()->NewCallRuntime(
ast_value_factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kDefaultConstructorSuperCall), args,
- pos);
- body->Add(factory()->NewExpressionStatement(call, pos), zone());
- function_scope->RecordSuperConstructorCallUsage();
+ Runtime::FunctionForId(Runtime::kInlineDefaultConstructorCallSuper),
+ args, pos);
+ body->Add(factory()->NewReturnStatement(call, pos), zone());
}
materialized_literal_count = function_state.materialized_literal_count();
@@ -314,8 +309,7 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
materialized_literal_count, expected_property_count, handler_count,
parameter_count, FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
- FunctionLiteral::kNotParenthesized, FunctionKind::kDefaultConstructor,
- pos);
+ FunctionLiteral::kNotParenthesized, kind, pos);
return function_literal;
}
@@ -329,8 +323,8 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
class Target BASE_EMBEDDED {
public:
- Target(Target** variable, AstNode* node)
- : variable_(variable), node_(node), previous_(*variable) {
+ Target(Target** variable, BreakableStatement* statement)
+ : variable_(variable), statement_(statement), previous_(*variable) {
*variable = this;
}
@@ -339,11 +333,11 @@ class Target BASE_EMBEDDED {
}
Target* previous() { return previous_; }
- AstNode* node() { return node_; }
+ BreakableStatement* statement() { return statement_; }
private:
Target** variable_;
- AstNode* node_;
+ BreakableStatement* statement_;
Target* previous_;
};
@@ -388,9 +382,18 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// Implementation of Parser
+bool ParserTraits::IsEval(const AstRawString* identifier) const {
+ return identifier == parser_->ast_value_factory()->eval_string();
+}
+
+
+bool ParserTraits::IsArguments(const AstRawString* identifier) const {
+ return identifier == parser_->ast_value_factory()->arguments_string();
+}
+
+
bool ParserTraits::IsEvalOrArguments(const AstRawString* identifier) const {
- return identifier == parser_->ast_value_factory()->eval_string() ||
- identifier == parser_->ast_value_factory()->arguments_string();
+ return IsEval(identifier) || IsArguments(identifier);
}
@@ -407,9 +410,8 @@ bool ParserTraits::IsConstructor(const AstRawString* identifier) const {
bool ParserTraits::IsThisProperty(Expression* expression) {
DCHECK(expression != NULL);
Property* property = expression->AsProperty();
- return property != NULL &&
- property->obj()->AsVariableProxy() != NULL &&
- property->obj()->AsVariableProxy()->is_this();
+ return property != NULL && property->obj()->IsVariableProxy() &&
+ property->obj()->AsVariableProxy()->is_this();
}
@@ -433,8 +435,7 @@ void ParserTraits::PushPropertyName(FuncNameInferrer* fni,
void ParserTraits::CheckAssigningFunctionLiteralToProperty(Expression* left,
Expression* right) {
DCHECK(left != NULL);
- if (left->AsProperty() != NULL &&
- right->AsFunctionLiteral() != NULL) {
+ if (left->IsProperty() && right->IsFunctionLiteral()) {
right->AsFunctionLiteral()->set_pretenure();
}
}
@@ -665,7 +666,7 @@ const AstRawString* ParserTraits::GetNumberAsSymbol(Scanner* scanner) {
char array[100];
const char* string =
DoubleToCString(double_value, Vector<char>(array, arraysize(array)));
- return ast_value_factory()->GetOneByteString(string);
+ return parser_->ast_value_factory()->GetOneByteString(string);
}
@@ -718,13 +719,14 @@ Expression* ParserTraits::ExpressionFromIdentifier(const AstRawString* name,
int pos, Scope* scope,
AstNodeFactory* factory) {
if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
- // The name may refer to a module instance object, so its type is unknown.
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Variable %.*s ", name->length(), name->raw_data());
-#endif
- Interface* interface = Interface::NewUnknown(parser_->zone());
- return scope->NewUnresolved(factory, name, interface, pos);
+
+ // Arrow function parameters are parsed as an expression. When
+ // parsing lazily, it is enough to create a VariableProxy in order
+ // for Traits::DeclareArrowParametersFromExpression() to be able to
+ // pick the names of the parameters.
+ return parser_->parsing_lazy_arrow_parameters_
+ ? factory->NewVariableProxy(name, false, pos)
+ : scope->NewUnresolved(factory, name, pos);
}
@@ -779,22 +781,29 @@ ClassLiteral* ParserTraits::ParseClassLiteral(
}
-Parser::Parser(CompilationInfo* info, ParseInfo* parse_info)
- : ParserBase<ParserTraits>(&scanner_, parse_info->stack_limit,
- info->extension(), NULL, info->zone(), this),
- scanner_(parse_info->unicode_cache),
+Parser::Parser(CompilationInfo* info, uintptr_t stack_limit, uint32_t hash_seed,
+ UnicodeCache* unicode_cache)
+ : ParserBase<ParserTraits>(info->zone(), &scanner_, stack_limit,
+ info->extension(), info->ast_value_factory(),
+ NULL, this),
+ scanner_(unicode_cache),
reusable_preparser_(NULL),
original_scope_(NULL),
target_stack_(NULL),
+ compile_options_(info->compile_options()),
cached_parse_data_(NULL),
- info_(info),
+ parsing_lazy_arrow_parameters_(false),
has_pending_error_(false),
pending_error_message_(NULL),
pending_error_arg_(NULL),
pending_error_char_arg_(NULL),
total_preparse_skipped_(0),
- pre_parse_timer_(NULL) {
- DCHECK(!script().is_null() || info->source_stream() != NULL);
+ pre_parse_timer_(NULL),
+ parsing_on_main_thread_(true) {
+ // Even though we were passed CompilationInfo, we should not store it in
+ // Parser - this makes sure that Isolate is not accidentally accessed via
+ // CompilationInfo during background parsing.
+ DCHECK(!info->script().is_null() || info->source_stream() != NULL);
set_allow_lazy(false); // Must be explicitly enabled.
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping);
@@ -806,27 +815,34 @@ Parser::Parser(CompilationInfo* info, ParseInfo* parse_info)
set_allow_harmony_templates(FLAG_harmony_templates);
set_allow_harmony_sloppy(FLAG_harmony_sloppy);
set_allow_harmony_unicode(FLAG_harmony_unicode);
+ set_allow_harmony_computed_property_names(
+ FLAG_harmony_computed_property_names);
+ set_allow_harmony_rest_params(FLAG_harmony_rest_parameters);
+ set_allow_strong_mode(FLAG_strong_mode);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
}
if (info->ast_value_factory() == NULL) {
// info takes ownership of AstValueFactory.
- info->SetAstValueFactory(
- new AstValueFactory(zone(), parse_info->hash_seed));
+ info->SetAstValueFactory(new AstValueFactory(zone(), hash_seed));
+ ast_value_factory_ = info->ast_value_factory();
}
}
-FunctionLiteral* Parser::ParseProgram() {
+FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
// TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
// see comment for HistogramTimerScope class.
- // It's OK to use the counters here, since this function is only called in
- // the main thread.
- HistogramTimerScope timer_scope(isolate()->counters()->parse(), true);
- Handle<String> source(String::cast(script()->source()));
- isolate()->counters()->total_parse_size()->Increment(source->length());
+ // It's OK to use the Isolate & counters here, since this function is only
+ // called in the main thread.
+ DCHECK(parsing_on_main_thread_);
+
+ Isolate* isolate = info->isolate();
+ HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
+ Handle<String> source(String::cast(info->script()->source()));
+ isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
if (FLAG_trace_parse) {
timer.Start();
@@ -854,24 +870,24 @@ FunctionLiteral* Parser::ParseProgram() {
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- result = DoParseProgram(info(), &top_scope, &eval_scope);
+ result = DoParseProgram(info, &top_scope, &eval_scope);
} else {
GenericStringUtf16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- result = DoParseProgram(info(), &top_scope, &eval_scope);
+ result = DoParseProgram(info, &top_scope, &eval_scope);
}
top_scope->set_end_position(source->length());
if (eval_scope != NULL) {
eval_scope->set_end_position(source->length());
}
- HandleSourceURLComments();
+ HandleSourceURLComments(info);
if (FLAG_trace_parse && result != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
- if (info()->is_eval()) {
+ if (info->is_eval()) {
PrintF("[parsing eval");
- } else if (info()->script()->name()->IsString()) {
- String* name = String::cast(info()->script()->name());
+ } else if (info->script()->name()->IsString()) {
+ String* name = String::cast(info->script()->name());
SmartArrayPointer<char> name_chars = name->ToCString();
PrintF("[parsing script: %s", name_chars.get());
} else {
@@ -880,7 +896,7 @@ FunctionLiteral* Parser::ParseProgram() {
PrintF(" - took %0.3f ms]\n", ms);
}
if (produce_cached_parse_data()) {
- if (result != NULL) *info_->cached_data() = recorder.GetScriptData();
+ if (result != NULL) *info->cached_data() = recorder.GetScriptData();
log_ = NULL;
}
return result;
@@ -889,6 +905,9 @@ FunctionLiteral* Parser::ParseProgram() {
FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
Scope** eval_scope) {
+ // Note that this function can be called from the main thread or from a
+ // background thread. We should not access anything Isolate / heap dependent
+ // via CompilationInfo, and also not pass it forward.
DCHECK(scope_ == NULL);
DCHECK(target_stack_ == NULL);
@@ -897,16 +916,18 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
*scope = NewScope(scope_, SCRIPT_SCOPE);
info->SetScriptScope(*scope);
if (!info->context().is_null() && !info->context()->IsNativeContext()) {
- *scope = Scope::DeserializeScopeChain(*info->context(), *scope, zone());
+ *scope = Scope::DeserializeScopeChain(info->isolate(), zone(),
+ *info->context(), *scope);
// The Scope is backed up by ScopeInfo (which is in the V8 heap); this
// means the Parser cannot operate independent of the V8 heap. Tell the
// string table to internalize strings and values right after they're
- // created.
- ast_value_factory()->Internalize(isolate());
+ // created. This kind of parsing can only be done in the main thread.
+ DCHECK(parsing_on_main_thread_);
+ ast_value_factory()->Internalize(info->isolate());
}
original_scope_ = *scope;
if (info->is_eval()) {
- if (!(*scope)->is_script_scope() || info->strict_mode() == STRICT) {
+ if (!(*scope)->is_script_scope() || is_strict(info->language_mode())) {
*scope = NewScope(*scope, EVAL_SCOPE);
}
} else if (info->is_global()) {
@@ -926,20 +947,27 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
// Enters 'scope'.
AstNodeFactory function_factory(ast_value_factory());
FunctionState function_state(&function_state_, &scope_, *scope,
- &function_factory);
+ kNormalFunction, &function_factory);
- scope_->SetStrictMode(info->strict_mode());
+ scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
int beg_pos = scanner()->location().beg_pos;
- ParseSourceElements(body, Token::EOS, info->is_eval(), true, eval_scope,
- &ok);
+ if (info->is_module()) {
+ DCHECK(allow_harmony_modules());
+ Statement* stmt = ParseModule(&ok);
+ if (ok) {
+ body->Add(stmt, zone());
+ }
+ } else {
+ ParseStatementList(body, Token::EOS, info->is_eval(), eval_scope, &ok);
+ }
- if (ok && strict_mode() == STRICT) {
+ if (ok && is_strict(language_mode())) {
CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
}
- if (ok && allow_harmony_scoping() && strict_mode() == STRICT) {
+ if (ok && allow_harmony_scoping() && is_strict(language_mode())) {
CheckConflictingVarDeclarations(scope_, &ok);
}
@@ -972,17 +1000,18 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
}
-FunctionLiteral* Parser::ParseLazy() {
- // It's OK to use the counters here, since this function is only called in
- // the main thread.
- HistogramTimerScope timer_scope(isolate()->counters()->parse_lazy());
- Handle<String> source(String::cast(script()->source()));
- isolate()->counters()->total_parse_size()->Increment(source->length());
+FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
+ // It's OK to use the Isolate & counters here, since this function is only
+ // called in the main thread.
+ DCHECK(parsing_on_main_thread_);
+ HistogramTimerScope timer_scope(info->isolate()->counters()->parse_lazy());
+ Handle<String> source(String::cast(info->script()->source()));
+ info->isolate()->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
if (FLAG_trace_parse) {
timer.Start();
}
- Handle<SharedFunctionInfo> shared_info = info()->shared_info();
+ Handle<SharedFunctionInfo> shared_info = info->shared_info();
// Initialize parser state.
source = String::Flatten(source);
@@ -992,12 +1021,12 @@ FunctionLiteral* Parser::ParseLazy() {
Handle<ExternalTwoByteString>::cast(source),
shared_info->start_position(),
shared_info->end_position());
- result = ParseLazy(&stream);
+ result = ParseLazy(info, &stream);
} else {
GenericStringUtf16CharacterStream stream(source,
shared_info->start_position(),
shared_info->end_position());
- result = ParseLazy(&stream);
+ result = ParseLazy(info, &stream);
}
if (FLAG_trace_parse && result != NULL) {
@@ -1009,8 +1038,9 @@ FunctionLiteral* Parser::ParseLazy() {
}
-FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
- Handle<SharedFunctionInfo> shared_info = info()->shared_info();
+FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
+ Utf16CharacterStream* source) {
+ Handle<SharedFunctionInfo> shared_info = info->shared_info();
scanner_.Initialize(source);
DCHECK(scope_ == NULL);
DCHECK(target_stack_ == NULL);
@@ -1029,18 +1059,22 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
{
// Parse the function literal.
Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
- info()->SetScriptScope(scope);
- if (!info()->closure().is_null()) {
- scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
- zone());
+ info->SetScriptScope(scope);
+ if (!info->closure().is_null()) {
+ // Ok to use Isolate here, since lazy function parsing is only done in the
+ // main thread.
+ DCHECK(parsing_on_main_thread_);
+ scope = Scope::DeserializeScopeChain(info->isolate(), zone(),
+ info->closure()->context(), scope);
}
original_scope_ = scope;
AstNodeFactory function_factory(ast_value_factory());
FunctionState function_state(&function_state_, &scope_, scope,
- &function_factory);
- DCHECK(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT);
- DCHECK(info()->strict_mode() == shared_info->strict_mode());
- scope->SetStrictMode(shared_info->strict_mode());
+ shared_info->kind(), &function_factory);
+ DCHECK(is_sloppy(scope->language_mode()) ||
+ is_strict(info->language_mode()));
+ DCHECK(info->language_mode() == shared_info->language_mode());
+ scope->SetLanguageMode(shared_info->language_mode());
FunctionLiteral::FunctionType function_type = shared_info->is_expression()
? (shared_info->is_anonymous()
? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -1049,11 +1083,15 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
bool ok = true;
if (shared_info->is_arrow()) {
+ // The first expression being parsed is the parameter list of the arrow
+ // function. Setting this avoids prevents ExpressionFromIdentifier()
+ // from creating unresolved variables in already-resolved scopes.
+ parsing_lazy_arrow_parameters_ = true;
Expression* expression = ParseExpression(false, &ok);
DCHECK(expression->IsFunctionLiteral());
result = expression->AsFunctionLiteral();
} else if (shared_info->is_default_constructor()) {
- result = DefaultConstructor(shared_info->uses_super_constructor_call(),
+ result = DefaultConstructor(IsSubclassConstructor(shared_info->kind()),
scope, shared_info->start_position(),
shared_info->end_position());
} else {
@@ -1078,11 +1116,10 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
}
-void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
- int end_token, bool is_eval, bool is_global,
- Scope** eval_scope, bool* ok) {
- // SourceElements ::
- // (ModuleElement)* <end_token>
+void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
+ bool is_eval, Scope** eval_scope, bool* ok) {
+ // StatementList ::
+ // (StatementListItem)* <end_token>
// Allocate a target stack to use for this set of source
// elements. This way, all scripts and functions get their own
@@ -1090,7 +1127,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// functions.
TargetScope scope(&this->target_stack_);
- DCHECK(processor != NULL);
+ DCHECK(body != NULL);
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) {
@@ -1099,12 +1136,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
}
Scanner::Location token_loc = scanner()->peek_location();
- Statement* stat;
- if (is_global && !is_eval) {
- stat = ParseModuleElement(NULL, CHECK_OK);
- } else {
- stat = ParseBlockElement(NULL, CHECK_OK);
- }
+ Statement* stat = ParseStatementListItem(CHECK_OK);
if (stat == NULL || stat->IsEmpty()) {
directive_prologue = false; // End of directive prologue.
continue;
@@ -1118,33 +1150,48 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
if ((e_stat = stat->AsExpressionStatement()) != NULL &&
(literal = e_stat->expression()->AsLiteral()) != NULL &&
literal->raw_value()->IsString()) {
- // Check "use strict" directive (ES5 14.1) and "use asm" directive. Only
- // one can be present.
- if (strict_mode() == SLOPPY &&
+ // Check "use strict" directive (ES5 14.1), "use asm" directive, and
+ // "use strong" directive (experimental).
+ bool use_strict_found =
literal->raw_value()->AsString() ==
ast_value_factory()->use_strict_string() &&
token_loc.end_pos - token_loc.beg_pos ==
- ast_value_factory()->use_strict_string()->length() + 2) {
- // TODO(mstarzinger): Global strict eval calls, need their own scope
- // as specified in ES5 10.4.2(3). The correct fix would be to always
- // add this scope in DoParseProgram(), but that requires adaptations
- // all over the code base, so we go with a quick-fix for now.
- // In the same manner, we have to patch the parsing mode.
- if (is_eval && !scope_->is_eval_scope()) {
- DCHECK(scope_->is_script_scope());
- Scope* scope = NewScope(scope_, EVAL_SCOPE);
- scope->set_start_position(scope_->start_position());
- scope->set_end_position(scope_->end_position());
- scope_ = scope;
- if (eval_scope != NULL) {
- // Caller will correct the positions of the ad hoc eval scope.
- *eval_scope = scope;
+ ast_value_factory()->use_strict_string()->length() + 2;
+ bool use_strong_found =
+ allow_strong_mode() &&
+ literal->raw_value()->AsString() ==
+ ast_value_factory()->use_strong_string() &&
+ token_loc.end_pos - token_loc.beg_pos ==
+ ast_value_factory()->use_strong_string()->length() + 2;
+ if (use_strict_found || use_strong_found) {
+ // Strong mode implies strict mode. If there are several "use strict"
+ // / "use strong" directives, do the strict mode changes only once.
+ if (is_sloppy(scope_->language_mode())) {
+ // TODO(mstarzinger): Global strict eval calls, need their own scope
+ // as specified in ES5 10.4.2(3). The correct fix would be to always
+ // add this scope in DoParseProgram(), but that requires adaptations
+ // all over the code base, so we go with a quick-fix for now.
+ // In the same manner, we have to patch the parsing mode.
+ if (is_eval && !scope_->is_eval_scope()) {
+ DCHECK(scope_->is_script_scope());
+ Scope* scope = NewScope(scope_, EVAL_SCOPE);
+ scope->set_start_position(scope_->start_position());
+ scope->set_end_position(scope_->end_position());
+ scope_ = scope;
+ if (eval_scope != NULL) {
+ // Caller will correct the positions of the ad hoc eval scope.
+ *eval_scope = scope;
+ }
+ mode_ = PARSE_EAGERLY;
}
- mode_ = PARSE_EAGERLY;
+ scope_->SetLanguageMode(static_cast<LanguageMode>(
+ scope_->language_mode() | STRICT_BIT));
+ }
+
+ if (use_strong_found) {
+ scope_->SetLanguageMode(static_cast<LanguageMode>(
+ scope_->language_mode() | STRONG_BIT));
}
- scope_->SetStrictMode(STRICT);
- // "use strict" is the only directive for now.
- directive_prologue = false;
} else if (literal->raw_value()->AsString() ==
ast_value_factory()->use_asm_string() &&
token_loc.end_pos - token_loc.beg_pos ==
@@ -1160,169 +1207,89 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
}
}
- processor->Add(stat, zone());
+ body->Add(stat, zone());
}
return 0;
}
-Statement* Parser::ParseModuleElement(ZoneList<const AstRawString*>* labels,
- bool* ok) {
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
+Statement* Parser::ParseStatementListItem(bool* ok) {
+ // (Ecma 262 6th Edition, 13.1):
+ // StatementListItem:
// Statement
- // FunctionDeclaration
- //
- // In harmony mode we allow additionally the following productions
- // ModuleElement:
- // LetDeclaration
- // ConstDeclaration
- // ModuleDeclaration
- // ImportDeclaration
- // ExportDeclaration
- // GeneratorDeclaration
+ // Declaration
switch (peek()) {
case Token::FUNCTION:
return ParseFunctionDeclaration(NULL, ok);
case Token::CLASS:
return ParseClassDeclaration(NULL, ok);
- case Token::IMPORT:
- return ParseImportDeclaration(ok);
- case Token::EXPORT:
- return ParseExportDeclaration(ok);
case Token::CONST:
- return ParseVariableStatement(kModuleElement, NULL, ok);
+ case Token::VAR:
+ return ParseVariableStatement(kStatementListItem, NULL, ok);
case Token::LET:
DCHECK(allow_harmony_scoping());
- if (strict_mode() == STRICT) {
- return ParseVariableStatement(kModuleElement, NULL, ok);
+ if (is_strict(language_mode())) {
+ return ParseVariableStatement(kStatementListItem, NULL, ok);
}
// Fall through.
- default: {
- Statement* stmt = ParseStatement(labels, CHECK_OK);
- // Handle 'module' as a context-sensitive keyword.
- if (FLAG_harmony_modules &&
- peek() == Token::IDENTIFIER &&
- !scanner()->HasAnyLineTerminatorBeforeNext() &&
- stmt != NULL) {
- ExpressionStatement* estmt = stmt->AsExpressionStatement();
- if (estmt != NULL && estmt->expression()->AsVariableProxy() != NULL &&
- estmt->expression()->AsVariableProxy()->raw_name() ==
- ast_value_factory()->module_string() &&
- !scanner()->literal_contains_escapes()) {
- return ParseModuleDeclaration(NULL, ok);
- }
- }
- return stmt;
- }
- }
-}
-
-
-Statement* Parser::ParseModuleDeclaration(ZoneList<const AstRawString*>* names,
- bool* ok) {
- // ModuleDeclaration:
- // 'module' Identifier Module
-
- int pos = peek_position();
- const AstRawString* name =
- ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
-
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Module %.*s ", name->length(), name->raw_data());
-#endif
-
- Module* module = ParseModule(CHECK_OK);
- VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
- Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, scope_, pos);
- Declare(declaration, true, CHECK_OK);
-
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Module %.*s ", name->length(), name->raw_data());
- if (FLAG_print_interfaces) {
- PrintF("module %.*s: ", name->length(), name->raw_data());
- module->interface()->Print();
+ default:
+ return ParseStatement(NULL, ok);
}
-#endif
-
- if (names) names->Add(name, zone());
- if (module->body() == NULL)
- return factory()->NewEmptyStatement(pos);
- else
- return factory()->NewModuleStatement(proxy, module->body(), pos);
}
-Module* Parser::ParseModule(bool* ok) {
- // Module:
- // '{' ModuleElement '}'
- // '=' ModulePath ';'
- // 'at' String ';'
+Statement* Parser::ParseModuleItem(bool* ok) {
+ // (Ecma 262 6th Edition, 15.2):
+ // ModuleItem :
+ // ImportDeclaration
+ // ExportDeclaration
+ // StatementListItem
switch (peek()) {
- case Token::LBRACE:
- return ParseModuleLiteral(ok);
-
- case Token::ASSIGN: {
- Expect(Token::ASSIGN, CHECK_OK);
- Module* result = ParseModulePath(CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
- }
-
- default: {
- ExpectContextualKeyword(CStrVector("at"), CHECK_OK);
- Module* result = ParseModuleUrl(CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return result;
- }
+ case Token::IMPORT:
+ return ParseImportDeclaration(ok);
+ case Token::EXPORT:
+ return ParseExportDeclaration(ok);
+ default:
+ return ParseStatementListItem(ok);
}
}
-Module* Parser::ParseModuleLiteral(bool* ok) {
- // Module:
- // '{' ModuleElement '}'
+Statement* Parser::ParseModule(bool* ok) {
+ // (Ecma 262 6th Edition, 15.2):
+ // Module :
+ // ModuleBody?
+ //
+ // ModuleBody :
+ // ModuleItem*
- int pos = peek_position();
- // Construct block expecting 16 statements.
Block* body = factory()->NewBlock(NULL, 16, false, RelocInfo::kNoPosition);
-#ifdef DEBUG
- if (FLAG_print_interface_details) PrintF("# Literal ");
-#endif
Scope* scope = NewScope(scope_, MODULE_SCOPE);
-
- Expect(Token::LBRACE, CHECK_OK);
scope->set_start_position(scanner()->location().beg_pos);
- scope->SetStrictMode(STRICT);
+ scope->SetLanguageMode(
+ static_cast<LanguageMode>(scope->language_mode() | STRICT_BIT));
{
BlockState block_state(&scope_, scope);
- TargetCollector collector(zone());
- Target target(&this->target_stack_, &collector);
- Target target_body(&this->target_stack_, body);
- while (peek() != Token::RBRACE) {
- Statement* stat = ParseModuleElement(NULL, CHECK_OK);
+ while (peek() != Token::EOS) {
+ Statement* stat = ParseModuleItem(CHECK_OK);
if (stat && !stat->IsEmpty()) {
body->AddStatement(stat, zone());
}
}
}
- Expect(Token::RBRACE, CHECK_OK);
scope->set_end_position(scanner()->location().end_pos);
body->set_scope(scope);
// Check that all exports are bound.
- Interface* interface = scope->interface();
- for (Interface::Iterator it = interface->iterator();
- !it.done(); it.Advance()) {
+ ModuleDescriptor* descriptor = scope->module();
+ for (ModuleDescriptor::Iterator it = descriptor->iterator(); !it.done();
+ it.Advance()) {
if (scope->LookupLocal(it.name()) == NULL) {
ParserTraits::ReportMessage("module_export_undefined", it.name());
*ok = false;
@@ -1330,196 +1297,284 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
}
}
- interface->MakeModule(ok);
- DCHECK(*ok);
- interface->Freeze(ok);
- DCHECK(*ok);
- return factory()->NewModuleLiteral(body, interface, pos);
+ scope->module()->Freeze();
+ return body;
}
-Module* Parser::ParseModulePath(bool* ok) {
- // ModulePath:
- // Identifier
- // ModulePath '.' Identifier
+Literal* Parser::ParseModuleSpecifier(bool* ok) {
+ // ModuleSpecifier :
+ // StringLiteral
int pos = peek_position();
- Module* result = ParseModuleVariable(CHECK_OK);
- while (Check(Token::PERIOD)) {
- const AstRawString* name = ParseIdentifierName(CHECK_OK);
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Path .%.*s ", name->length(), name->raw_data());
-#endif
- Module* member = factory()->NewModulePath(result, name, pos);
- result->interface()->Add(name, member->interface(), zone(), ok);
- if (!*ok) {
-#ifdef DEBUG
- if (FLAG_print_interfaces) {
- PrintF("PATH TYPE ERROR at '%.*s'\n", name->length(), name->raw_data());
- PrintF("result: ");
- result->interface()->Print();
- PrintF("member: ");
- member->interface()->Print();
- }
-#endif
- ParserTraits::ReportMessage("invalid_module_path", name);
- return NULL;
- }
- result = member;
- }
-
- return result;
+ Expect(Token::STRING, CHECK_OK);
+ return factory()->NewStringLiteral(GetSymbol(scanner()), pos);
}
-Module* Parser::ParseModuleVariable(bool* ok) {
- // ModulePath:
- // Identifier
-
- int pos = peek_position();
- const AstRawString* name =
- ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Module variable %.*s ", name->length(), name->raw_data());
-#endif
- VariableProxy* proxy = scope_->NewUnresolved(
- factory(), name, Interface::NewModule(zone()),
- scanner()->location().beg_pos);
+void* Parser::ParseExportClause(ZoneList<const AstRawString*>* names,
+ Scanner::Location* reserved_loc, bool* ok) {
+ // ExportClause :
+ // '{' '}'
+ // '{' ExportsList '}'
+ // '{' ExportsList ',' '}'
+ //
+ // ExportsList :
+ // ExportSpecifier
+ // ExportsList ',' ExportSpecifier
+ //
+ // ExportSpecifier :
+ // IdentifierName
+ // IdentifierName 'as' IdentifierName
- return factory()->NewModuleVariable(proxy, pos);
-}
+ Expect(Token::LBRACE, CHECK_OK);
+ Token::Value name_tok;
+ while ((name_tok = peek()) != Token::RBRACE) {
+ // Keep track of the first reserved word encountered in case our
+ // caller needs to report an error.
+ if (!reserved_loc->IsValid() &&
+ !Token::IsIdentifier(name_tok, STRICT, false)) {
+ *reserved_loc = scanner()->location();
+ }
+ const AstRawString* name = ParseIdentifierName(CHECK_OK);
+ names->Add(name, zone());
+ const AstRawString* export_name = NULL;
+ if (CheckContextualKeyword(CStrVector("as"))) {
+ export_name = ParseIdentifierName(CHECK_OK);
+ }
+ // TODO(ES6): Return the export_name as well as the name.
+ USE(export_name);
+ if (peek() == Token::RBRACE) break;
+ Expect(Token::COMMA, CHECK_OK);
+ }
-Module* Parser::ParseModuleUrl(bool* ok) {
- // Module:
- // String
+ Expect(Token::RBRACE, CHECK_OK);
- int pos = peek_position();
- Expect(Token::STRING, CHECK_OK);
- const AstRawString* symbol = GetSymbol(scanner());
+ return 0;
+}
- // TODO(ES6): Request JS resource from environment...
-#ifdef DEBUG
- if (FLAG_print_interface_details) PrintF("# Url ");
-#endif
+void* Parser::ParseNamedImports(ZoneList<const AstRawString*>* names,
+ bool* ok) {
+ // NamedImports :
+ // '{' '}'
+ // '{' ImportsList '}'
+ // '{' ImportsList ',' '}'
+ //
+ // ImportsList :
+ // ImportSpecifier
+ // ImportsList ',' ImportSpecifier
+ //
+ // ImportSpecifier :
+ // BindingIdentifier
+ // IdentifierName 'as' BindingIdentifier
- // Create an empty literal as long as the feature isn't finished.
- USE(symbol);
- Scope* scope = NewScope(scope_, MODULE_SCOPE);
- Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
- body->set_scope(scope);
- Interface* interface = scope->interface();
- Module* result = factory()->NewModuleLiteral(body, interface, pos);
- interface->Freeze(ok);
- DCHECK(*ok);
- interface->Unify(scope->interface(), zone(), ok);
- DCHECK(*ok);
- return result;
-}
+ Expect(Token::LBRACE, CHECK_OK);
+ Token::Value name_tok;
+ while ((name_tok = peek()) != Token::RBRACE) {
+ const AstRawString* name = ParseIdentifierName(CHECK_OK);
+ const AstRawString* import_name = NULL;
+ // In the presence of 'as', the left-side of the 'as' can
+ // be any IdentifierName. But without 'as', it must be a valid
+ // BindingIdentiifer.
+ if (CheckContextualKeyword(CStrVector("as"))) {
+ import_name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
+ } else if (!Token::IsIdentifier(name_tok, STRICT, false)) {
+ *ok = false;
+ ReportMessageAt(scanner()->location(), "unexpected_reserved");
+ return NULL;
+ } else if (IsEvalOrArguments(name)) {
+ *ok = false;
+ ReportMessageAt(scanner()->location(), "strict_eval_arguments");
+ return NULL;
+ }
+ // TODO(ES6): Return the import_name as well as the name.
+ names->Add(name, zone());
+ USE(import_name);
+ if (peek() == Token::RBRACE) break;
+ Expect(Token::COMMA, CHECK_OK);
+ }
-Module* Parser::ParseModuleSpecifier(bool* ok) {
- // ModuleSpecifier:
- // String
- // ModulePath
+ Expect(Token::RBRACE, CHECK_OK);
- if (peek() == Token::STRING) {
- return ParseModuleUrl(ok);
- } else {
- return ParseModulePath(ok);
- }
+ return NULL;
}
-Block* Parser::ParseImportDeclaration(bool* ok) {
- // ImportDeclaration:
- // 'import' IdentifierName (',' IdentifierName)* 'from' ModuleSpecifier ';'
+Statement* Parser::ParseImportDeclaration(bool* ok) {
+ // ImportDeclaration :
+ // 'import' ImportClause 'from' ModuleSpecifier ';'
+ // 'import' ModuleSpecifier ';'
+ //
+ // ImportClause :
+ // NameSpaceImport
+ // NamedImports
+ // ImportedDefaultBinding
+ // ImportedDefaultBinding ',' NameSpaceImport
+ // ImportedDefaultBinding ',' NamedImports
//
- // TODO(ES6): implement destructuring ImportSpecifiers
+ // NameSpaceImport :
+ // '*' 'as' ImportedBinding
int pos = peek_position();
Expect(Token::IMPORT, CHECK_OK);
+
+ Token::Value tok = peek();
+
+ // 'import' ModuleSpecifier ';'
+ if (tok == Token::STRING) {
+ ParseModuleSpecifier(CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+ return factory()->NewEmptyStatement(pos);
+ }
+
+ // Parse ImportedDefaultBinding if present.
+ const AstRawString* imported_default_binding = NULL;
+ if (tok != Token::MUL && tok != Token::LBRACE) {
+ imported_default_binding =
+ ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
+ }
+
+ const AstRawString* module_instance_binding = NULL;
ZoneList<const AstRawString*> names(1, zone());
+ if (imported_default_binding == NULL || Check(Token::COMMA)) {
+ switch (peek()) {
+ case Token::MUL: {
+ Consume(Token::MUL);
+ ExpectContextualKeyword(CStrVector("as"), CHECK_OK);
+ module_instance_binding =
+ ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
+ break;
+ }
- const AstRawString* name = ParseIdentifierName(CHECK_OK);
- names.Add(name, zone());
- while (peek() == Token::COMMA) {
- Consume(Token::COMMA);
- name = ParseIdentifierName(CHECK_OK);
- names.Add(name, zone());
+ case Token::LBRACE:
+ ParseNamedImports(&names, CHECK_OK);
+ break;
+
+ default:
+ *ok = false;
+ ReportUnexpectedToken(scanner()->current_token());
+ return NULL;
+ }
}
ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
- Module* module = ParseModuleSpecifier(CHECK_OK);
+ Literal* module = ParseModuleSpecifier(CHECK_OK);
+ USE(module);
+
ExpectSemicolon(CHECK_OK);
- // Generate a separate declaration for each identifier.
- // TODO(ES6): once we implement destructuring, make that one declaration.
- Block* block = factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
+ if (module_instance_binding != NULL) {
+ // TODO(ES6): Bind name to the Module Instance Object of module.
+ }
+
+ if (imported_default_binding != NULL) {
+ // TODO(ES6): Add an appropriate declaration.
+ }
+
for (int i = 0; i < names.length(); ++i) {
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Import %.*s ", name->length(), name->raw_data());
-#endif
- Interface* interface = Interface::NewUnknown(zone());
- module->interface()->Add(names[i], interface, zone(), ok);
- if (!*ok) {
-#ifdef DEBUG
- if (FLAG_print_interfaces) {
- PrintF("IMPORT TYPE ERROR at '%.*s'\n", name->length(),
- name->raw_data());
- PrintF("module: ");
- module->interface()->Print();
- }
-#endif
- ParserTraits::ReportMessage("invalid_module_path", name);
- return NULL;
+ // TODO(ES6): Add an appropriate declaration for each name
+ }
+
+ return factory()->NewEmptyStatement(pos);
+}
+
+
+Statement* Parser::ParseExportDefault(bool* ok) {
+ // Supports the following productions, starting after the 'default' token:
+ // 'export' 'default' FunctionDeclaration
+ // 'export' 'default' ClassDeclaration
+ // 'export' 'default' AssignmentExpression[In] ';'
+
+ Statement* result = NULL;
+ switch (peek()) {
+ case Token::FUNCTION:
+ // TODO(ES6): Support parsing anonymous function declarations here.
+ result = ParseFunctionDeclaration(NULL, CHECK_OK);
+ break;
+
+ case Token::CLASS:
+ // TODO(ES6): Support parsing anonymous class declarations here.
+ result = ParseClassDeclaration(NULL, CHECK_OK);
+ break;
+
+ default: {
+ int pos = peek_position();
+ Expression* expr = ParseAssignmentExpression(true, CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+ result = factory()->NewExpressionStatement(expr, pos);
+ break;
}
- VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
- Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, scope_, pos);
- Declare(declaration, true, CHECK_OK);
}
- return block;
+ // TODO(ES6): Add default export to scope_->module()
+
+ return result;
}
Statement* Parser::ParseExportDeclaration(bool* ok) {
// ExportDeclaration:
- // 'export' Identifier (',' Identifier)* ';'
- // 'export' VariableDeclaration
- // 'export' FunctionDeclaration
- // 'export' GeneratorDeclaration
- // 'export' ModuleDeclaration
- //
- // TODO(ES6): implement structuring ExportSpecifiers
+ // 'export' '*' 'from' ModuleSpecifier ';'
+ // 'export' ExportClause ('from' ModuleSpecifier)? ';'
+ // 'export' VariableStatement
+ // 'export' Declaration
+ // 'export' 'default' ... (handled in ParseExportDefault)
+ int pos = peek_position();
Expect(Token::EXPORT, CHECK_OK);
Statement* result = NULL;
ZoneList<const AstRawString*> names(1, zone());
+ bool is_export_from = false;
switch (peek()) {
- case Token::IDENTIFIER: {
- int pos = position();
- const AstRawString* name =
- ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
- // Handle 'module' as a context-sensitive keyword.
- if (name != ast_value_factory()->module_string()) {
- names.Add(name, zone());
- while (peek() == Token::COMMA) {
- Consume(Token::COMMA);
- name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
- names.Add(name, zone());
- }
- ExpectSemicolon(CHECK_OK);
- result = factory()->NewEmptyStatement(pos);
- } else {
- result = ParseModuleDeclaration(&names, CHECK_OK);
+ case Token::DEFAULT:
+ Consume(Token::DEFAULT);
+ return ParseExportDefault(ok);
+
+ case Token::MUL: {
+ Consume(Token::MUL);
+ ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
+ Literal* module = ParseModuleSpecifier(CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+ // TODO(ES6): Do something with the return value
+ // of ParseModuleSpecifier.
+ USE(module);
+ is_export_from = true;
+ result = factory()->NewEmptyStatement(pos);
+ break;
+ }
+
+ case Token::LBRACE: {
+ // There are two cases here:
+ //
+ // 'export' ExportClause ';'
+ // and
+ // 'export' ExportClause FromClause ';'
+ //
+ // In the first case, the exported identifiers in ExportClause must
+ // not be reserved words, while in the latter they may be. We
+ // pass in a location that gets filled with the first reserved word
+ // encountered, and then throw a SyntaxError if we are in the
+ // non-FromClause case.
+ Scanner::Location reserved_loc = Scanner::Location::invalid();
+ ParseExportClause(&names, &reserved_loc, CHECK_OK);
+ if (CheckContextualKeyword(CStrVector("from"))) {
+ Literal* module = ParseModuleSpecifier(CHECK_OK);
+ // TODO(ES6): Do something with the return value
+ // of ParseModuleSpecifier.
+ USE(module);
+ is_export_from = true;
+ } else if (reserved_loc.IsValid()) {
+ // No FromClause, so reserved words are invalid in ExportClause.
+ *ok = false;
+ ReportMessageAt(reserved_loc, "unexpected_reserved");
+ return NULL;
}
+ ExpectSemicolon(CHECK_OK);
+ result = factory()->NewEmptyStatement(pos);
break;
}
@@ -1534,7 +1589,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
case Token::VAR:
case Token::LET:
case Token::CONST:
- result = ParseVariableStatement(kModuleElement, &names, CHECK_OK);
+ result = ParseVariableStatement(kStatementListItem, &names, CHECK_OK);
break;
default:
@@ -1556,24 +1611,21 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
}
}
- // Extract declared names into export declarations and interface.
- Interface* interface = scope_->interface();
- for (int i = 0; i < names.length(); ++i) {
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Export %.*s ", names[i]->length(), names[i]->raw_data());
-#endif
- Interface* inner = Interface::NewUnknown(zone());
- interface->Add(names[i], inner, zone(), CHECK_OK);
- if (!*ok)
- return NULL;
- VariableProxy* proxy = NewUnresolved(names[i], LET, inner);
- USE(proxy);
- // TODO(rossberg): Rethink whether we actually need to store export
- // declarations (for compilation?).
- // ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, scope_, position);
- // scope_->AddDeclaration(declaration);
+ // TODO(ES6): Handle 'export from' once imports are properly implemented.
+ // For now we just drop such exports on the floor.
+ if (!is_export_from) {
+ // Extract declared names into export declarations and module descriptor.
+ ModuleDescriptor* descriptor = scope_->module();
+ for (int i = 0; i < names.length(); ++i) {
+ // TODO(adamk): Make early errors here provide the right error message
+ // (duplicate exported names).
+ descriptor->Add(names[i], zone(), CHECK_OK);
+ // TODO(rossberg): Rethink whether we actually need to store export
+ // declarations (for compilation?).
+ // ExportDeclaration* declaration =
+ // factory()->NewExportDeclaration(proxy, scope_, position);
+ // scope_->AddDeclaration(declaration);
+ }
}
DCHECK(result != NULL);
@@ -1581,41 +1633,22 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
}
-Statement* Parser::ParseBlockElement(ZoneList<const AstRawString*>* labels,
- bool* ok) {
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- //
- // In harmony mode we allow additionally the following productions
- // BlockElement (aka SourceElement):
- // LetDeclaration
- // ConstDeclaration
- // GeneratorDeclaration
- // ClassDeclaration
+Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok) {
+ // Statement ::
+ // EmptyStatement
+ // ...
- switch (peek()) {
- case Token::FUNCTION:
- return ParseFunctionDeclaration(NULL, ok);
- case Token::CLASS:
- return ParseClassDeclaration(NULL, ok);
- case Token::CONST:
- return ParseVariableStatement(kModuleElement, NULL, ok);
- case Token::LET:
- DCHECK(allow_harmony_scoping());
- if (strict_mode() == STRICT) {
- return ParseVariableStatement(kModuleElement, NULL, ok);
- }
- // Fall through.
- default:
- return ParseStatement(labels, ok);
+ if (peek() == Token::SEMICOLON) {
+ Next();
+ return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
+ return ParseSubStatement(labels, ok);
}
-Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
- bool* ok) {
+Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok) {
// Statement ::
// Block
// VariableStatement
@@ -1644,6 +1677,11 @@ Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
return ParseBlock(labels, ok);
case Token::SEMICOLON:
+ if (is_strong(language_mode())) {
+ ReportMessageAt(scanner()->peek_location(), "strong_empty");
+ *ok = false;
+ return NULL;
+ }
Next();
return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
@@ -1703,7 +1741,7 @@ Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
// In Harmony mode, this case also handles the extension:
// Statement:
// GeneratorDeclaration
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
ReportMessageAt(scanner()->peek_location(), "strict_function");
*ok = false;
return NULL;
@@ -1711,22 +1749,21 @@ Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
return ParseFunctionDeclaration(NULL, ok);
}
- case Token::CLASS:
- return ParseClassDeclaration(NULL, ok);
-
case Token::DEBUGGER:
return ParseDebuggerStatement(ok);
case Token::VAR:
- case Token::CONST:
return ParseVariableStatement(kStatement, NULL, ok);
- case Token::LET:
- DCHECK(allow_harmony_scoping());
- if (strict_mode() == STRICT) {
+ case Token::CONST:
+ // In ES6 CONST is not allowed as a Statement, only as a
+ // LexicalDeclaration, however we continue to allow it in sloppy mode for
+ // backwards compatibility.
+ if (is_sloppy(language_mode())) {
return ParseVariableStatement(kStatement, NULL, ok);
}
- // Fall through.
+
+ // Fall through.
default:
return ParseExpressionOrLabelledStatement(labels, ok);
}
@@ -1734,14 +1771,13 @@ Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
VariableProxy* Parser::NewUnresolved(const AstRawString* name,
- VariableMode mode, Interface* interface) {
+ VariableMode mode) {
// If we are inside a function, a declaration of a var/const variable is a
// truly local variable, and the scope of the variable is always the function
// scope.
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
- return DeclarationScope(mode)->NewUnresolved(
- factory(), name, interface, position());
+ return DeclarationScope(mode)->NewUnresolved(factory(), name, position());
}
@@ -1770,9 +1806,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
var = declaration_scope->LookupLocal(name);
if (var == NULL) {
// Declare the name.
- var = declaration_scope->DeclareLocal(name, mode,
- declaration->initialization(),
- kNotAssigned, proxy->interface());
+ var = declaration_scope->DeclareLocal(
+ name, mode, declaration->initialization(), kNotAssigned);
} else if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode())
|| ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
!declaration_scope->is_script_scope())) {
@@ -1790,7 +1825,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
DCHECK(IsDeclaredVariableMode(var->mode()));
- if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ if (allow_harmony_scoping() && is_strict(language_mode())) {
// In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
ParserTraits::ReportMessage("var_redeclaration", name);
@@ -1827,19 +1862,17 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// For global const variables we bind the proxy to a variable.
DCHECK(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
- var = new (zone())
- Variable(declaration_scope, name, mode, true, kind,
- kNeedsInitialization, kNotAssigned, proxy->interface());
+ var = new (zone()) Variable(declaration_scope, name, mode, true, kind,
+ kNeedsInitialization, kNotAssigned);
} else if (declaration_scope->is_eval_scope() &&
- declaration_scope->strict_mode() == SLOPPY) {
+ is_sloppy(declaration_scope->language_mode())) {
// For variable declarations in a sloppy eval scope the proxy is bound
// to a lookup variable to force a dynamic declaration using the
// DeclareLookupSlot runtime function.
Variable::Kind kind = Variable::NORMAL;
// TODO(sigurds) figure out if kNotAssigned is OK here
var = new (zone()) Variable(declaration_scope, name, mode, true, kind,
- declaration->initialization(), kNotAssigned,
- proxy->interface());
+ declaration->initialization(), kNotAssigned);
var->AllocateTo(Variable::LOOKUP, -1);
resolve = true;
}
@@ -1870,29 +1903,6 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// runtime needs to provide both.
if (resolve && var != NULL) {
proxy->BindTo(var);
-
- if (FLAG_harmony_modules) {
- bool ok;
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("# Declare %.*s ", var->raw_name()->length(),
- var->raw_name()->raw_data());
- }
-#endif
- proxy->interface()->Unify(var->interface(), zone(), &ok);
- if (!ok) {
-#ifdef DEBUG
- if (FLAG_print_interfaces) {
- PrintF("DECLARE TYPE ERROR\n");
- PrintF("proxy: ");
- proxy->interface()->Print();
- PrintF("var: ");
- var->interface()->Print();
- }
-#endif
- ParserTraits::ReportMessage("module_type_error", name);
- }
- }
}
}
@@ -1927,7 +1937,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
- VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
+ VariableProxy* proxy = NewUnresolved(name, VAR);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, VAR, scope_, pos);
Declare(declaration, true, CHECK_OK);
@@ -1965,10 +1975,13 @@ Statement* Parser::ParseFunctionDeclaration(
// In ES6, a function behaves as a lexical binding, except in
// a script scope, or the initial scope of eval or another function.
VariableMode mode =
- allow_harmony_scoping() && strict_mode() == STRICT &&
- !(scope_->is_script_scope() || scope_->is_eval_scope() ||
- scope_->is_function_scope()) ? LET : VAR;
- VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
+ is_strong(language_mode()) ? CONST :
+ allow_harmony_scoping() && is_strict(language_mode()) &&
+ !(scope_->is_script_scope() || scope_->is_eval_scope() ||
+ scope_->is_function_scope())
+ ? LET
+ : VAR;
+ VariableProxy* proxy = NewUnresolved(name, mode);
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
Declare(declaration, true, CHECK_OK);
@@ -1993,7 +2006,7 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
// so rewrite it as such.
Expect(Token::CLASS, CHECK_OK);
- if (!allow_harmony_sloppy() && strict_mode() == SLOPPY) {
+ if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
ReportMessage("sloppy_lexical");
*ok = false;
return NULL;
@@ -2006,13 +2019,15 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
ClassLiteral* value = ParseClassLiteral(name, scanner()->location(),
is_strict_reserved, pos, CHECK_OK);
- VariableProxy* proxy = NewUnresolved(name, LET, Interface::NewValue());
+ VariableMode mode = is_strong(language_mode()) ? CONST : LET;
+ VariableProxy* proxy = NewUnresolved(name, mode);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, LET, scope_, pos);
+ factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
Declare(declaration, true, CHECK_OK);
proxy->var()->set_initializer_position(pos);
- Token::Value init_op = Token::INIT_LET;
+ Token::Value init_op =
+ is_strong(language_mode()) ? Token::INIT_CONST : Token::INIT_LET;
Assignment* assignment = factory()->NewAssignment(init_op, proxy, value, pos);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
@@ -2022,7 +2037,7 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
- if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ if (allow_harmony_scoping() && is_strict(language_mode())) {
return ParseScopedBlock(labels, ok);
}
@@ -2053,7 +2068,7 @@ Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels,
// The harmony mode uses block elements instead of statements.
//
// Block ::
- // '{' BlockElement* '}'
+ // '{' StatementList '}'
// Construct block expecting 16 statements.
Block* body =
@@ -2064,12 +2079,10 @@ Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels,
Expect(Token::LBRACE, CHECK_OK);
block_scope->set_start_position(scanner()->location().beg_pos);
{ BlockState block_state(&scope_, block_scope);
- TargetCollector collector(zone());
- Target target(&this->target_stack_, &collector);
- Target target_body(&this->target_stack_, body);
+ Target target(&this->target_stack_, body);
while (peek() != Token::RBRACE) {
- Statement* stat = ParseBlockElement(NULL, CHECK_OK);
+ Statement* stat = ParseStatementListItem(CHECK_OK);
if (stat && !stat->IsEmpty()) {
body->AddStatement(stat, zone());
}
@@ -2132,52 +2145,35 @@ Block* Parser::ParseVariableDeclarations(
bool is_const = false;
Token::Value init_op = Token::INIT_VAR;
if (peek() == Token::VAR) {
+ if (is_strong(language_mode())) {
+ Scanner::Location location = scanner()->peek_location();
+ ReportMessageAt(location, "strong_var");
+ *ok = false;
+ return NULL;
+ }
Consume(Token::VAR);
} else if (peek() == Token::CONST) {
- // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
- //
- // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
- //
- // * It is a Syntax Error if the code that matches this production is not
- // contained in extended code.
- //
- // However disallowing const in sloppy mode will break compatibility with
- // existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- switch (strict_mode()) {
- case SLOPPY:
- mode = CONST_LEGACY;
- init_op = Token::INIT_CONST_LEGACY;
- break;
- case STRICT:
- if (allow_harmony_scoping()) {
- if (var_context == kStatement) {
- // In strict mode 'const' declarations are only allowed in source
- // element positions.
- ReportMessage("unprotected_const");
- *ok = false;
- return NULL;
- }
- mode = CONST;
- init_op = Token::INIT_CONST;
- } else {
- ReportMessage("strict_const");
- *ok = false;
- return NULL;
- }
+ if (is_sloppy(language_mode())) {
+ mode = CONST_LEGACY;
+ init_op = Token::INIT_CONST_LEGACY;
+ } else {
+ DCHECK(var_context != kStatement);
+ // In ES5 const is not allowed in strict mode.
+ if (!allow_harmony_scoping()) {
+ ReportMessage("strict_const");
+ *ok = false;
+ return NULL;
+ }
+ mode = CONST;
+ init_op = Token::INIT_CONST;
}
is_const = true;
needs_init = true;
- } else if (peek() == Token::LET && strict_mode() == STRICT) {
+ } else if (peek() == Token::LET && is_strict(language_mode())) {
DCHECK(allow_harmony_scoping());
Consume(Token::LET);
- if (var_context == kStatement) {
- // Let declarations are only allowed in source element positions.
- ReportMessage("unprotected_let");
- *ok = false;
- return NULL;
- }
+ DCHECK(var_context != kStatement);
mode = LET;
needs_init = true;
init_op = Token::INIT_LET;
@@ -2234,9 +2230,7 @@ Block* Parser::ParseVariableDeclarations(
needs_init = false;
}
- Interface* interface =
- is_const ? Interface::NewConst() : Interface::NewValue();
- VariableProxy* proxy = NewUnresolved(name, mode, interface);
+ VariableProxy* proxy = NewUnresolved(name, mode);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
Declare(declaration, mode != VAR, CHECK_OK);
@@ -2345,10 +2339,10 @@ Block* Parser::ParseVariableDeclarations(
Runtime::FunctionForId(Runtime::kInitializeConstGlobal), arguments,
pos);
} else {
- // Add strict mode.
+ // Add language mode.
// We may want to pass singleton to avoid Literal allocations.
- StrictMode strict_mode = initialization_scope->strict_mode();
- arguments->Add(factory()->NewNumberLiteral(strict_mode, pos), zone());
+ LanguageMode language_mode = initialization_scope->language_mode();
+ arguments->Add(factory()->NewNumberLiteral(language_mode, pos), zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -2400,7 +2394,7 @@ Block* Parser::ParseVariableDeclarations(
// if they are inside a 'with' statement - they may change a 'with' object
// property).
VariableProxy* proxy =
- initialization_scope->NewUnresolved(factory(), name, interface);
+ initialization_scope->NewUnresolved(factory(), name);
Assignment* assignment =
factory()->NewAssignment(init_op, proxy, value, pos);
block->AddStatement(
@@ -2440,6 +2434,26 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
+ //
+ // ExpressionStatement[Yield] :
+ // [lookahead ∉ {{, function, class, let [}] Expression[In, ?Yield] ;
+
+ switch (peek()) {
+ case Token::FUNCTION:
+ case Token::LBRACE:
+ UNREACHABLE(); // Always handled by the callers.
+ case Token::CLASS:
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return nullptr;
+
+ // TODO(arv): Handle `let [`
+ // https://code.google.com/p/v8/issues/detail?id=3847
+
+ default:
+ break;
+ }
+
int pos = peek_position();
bool starts_with_idenfifier = peek_any_identifier();
Expression* expr = ParseExpression(true, CHECK_OK);
@@ -2484,24 +2498,16 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
return ParseNativeDeclaration(ok);
}
- // Parsed expression statement, or the context-sensitive 'module' keyword.
- // Only expect semicolon in the former case.
- // Also detect attempts at 'let' declarations in sloppy mode.
- if (!FLAG_harmony_modules || peek() != Token::IDENTIFIER ||
- scanner()->HasAnyLineTerminatorBeforeNext() ||
- expr->AsVariableProxy() == NULL ||
- expr->AsVariableProxy()->raw_name() !=
- ast_value_factory()->module_string() ||
- scanner()->literal_contains_escapes()) {
- if (peek() == Token::IDENTIFIER && expr->AsVariableProxy() != NULL &&
- expr->AsVariableProxy()->raw_name() ==
- ast_value_factory()->let_string()) {
- ReportMessage("sloppy_lexical", NULL);
- *ok = false;
- return NULL;
- }
- ExpectSemicolon(CHECK_OK);
+ // Parsed expression statement, followed by semicolon.
+ // Detect attempts at 'let' declarations in sloppy mode.
+ if (peek() == Token::IDENTIFIER && expr->AsVariableProxy() != NULL &&
+ expr->AsVariableProxy()->raw_name() ==
+ ast_value_factory()->let_string()) {
+ ReportMessage("sloppy_lexical", NULL);
+ *ok = false;
+ return NULL;
}
+ ExpectSemicolon(CHECK_OK);
return factory()->NewExpressionStatement(expr, pos);
}
@@ -2516,11 +2522,11 @@ IfStatement* Parser::ParseIfStatement(ZoneList<const AstRawString*>* labels,
Expect(Token::LPAREN, CHECK_OK);
Expression* condition = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Statement* then_statement = ParseStatement(labels, CHECK_OK);
+ Statement* then_statement = ParseSubStatement(labels, CHECK_OK);
Statement* else_statement = NULL;
if (peek() == Token::ELSE) {
Next();
- else_statement = ParseStatement(labels, CHECK_OK);
+ else_statement = ParseSubStatement(labels, CHECK_OK);
} else {
else_statement = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
@@ -2612,11 +2618,16 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
- return_value = GetLiteralUndefined(position());
+ if (IsSubclassConstructor(function_state_->kind())) {
+ return_value = ThisExpression(scope_, factory(), loc.beg_pos);
+ } else {
+ return_value = GetLiteralUndefined(position());
+ }
} else {
return_value = ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
+
if (is_generator()) {
Expression* generator = factory()->NewVariableProxy(
function_state_->generator_object_variable());
@@ -2645,7 +2656,7 @@ Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
Expect(Token::WITH, CHECK_OK);
int pos = position();
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
ReportMessage("strict_mode_with");
*ok = false;
return NULL;
@@ -2660,7 +2671,7 @@ Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
Statement* stmt;
{ BlockState block_state(&scope_, with_scope);
with_scope->set_start_position(scanner()->peek_location().beg_pos);
- stmt = ParseStatement(labels, CHECK_OK);
+ stmt = ParseSubStatement(labels, CHECK_OK);
with_scope->set_end_position(scanner()->location().end_pos);
}
return factory()->NewWithStatement(with_scope, expr, stmt, pos);
@@ -2762,12 +2773,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::TRY, CHECK_OK);
int pos = position();
- TargetCollector try_collector(zone());
- Block* try_block;
-
- { Target target(&this->target_stack_, &try_collector);
- try_block = ParseBlock(NULL, CHECK_OK);
- }
+ Block* try_block = ParseBlock(NULL, CHECK_OK);
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
@@ -2776,11 +2782,6 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
return NULL;
}
- // If we can break out from the catch block and there is a finally block,
- // then we will need to collect escaping targets from the catch
- // block. Since we don't know yet if there will be a finally block, we
- // always collect the targets.
- TargetCollector catch_collector(zone());
Scope* catch_scope = NULL;
Variable* catch_variable = NULL;
Block* catch_block = NULL;
@@ -2795,7 +2796,6 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
- Target target(&this->target_stack_, &catch_collector);
catch_variable = catch_scope->DeclareLocal(name, VAR, kCreatedInitialized);
BlockState block_state(&scope_, catch_scope);
catch_block = ParseBlock(NULL, CHECK_OK);
@@ -2823,7 +2823,6 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
TryCatchStatement* statement = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block,
RelocInfo::kNoPosition);
- statement->set_escaping_targets(try_collector.targets());
try_block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
try_block->AddStatement(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
@@ -2841,11 +2840,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
int index = function_state_->NextHandlerIndex();
result = factory()->NewTryFinallyStatement(
index, try_block, finally_block, pos);
- // Combine the jump targets of the try block and the possible catch block.
- try_collector.targets()->AddAll(*catch_collector.targets(), zone());
}
- result->set_escaping_targets(try_collector.targets());
return result;
}
@@ -2860,7 +2856,7 @@ DoWhileStatement* Parser::ParseDoWhileStatement(
Target target(&this->target_stack_, loop);
Expect(Token::DO, CHECK_OK);
- Statement* body = ParseStatement(NULL, CHECK_OK);
+ Statement* body = ParseSubStatement(NULL, CHECK_OK);
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -2890,26 +2886,13 @@ WhileStatement* Parser::ParseWhileStatement(
Expect(Token::LPAREN, CHECK_OK);
Expression* cond = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Statement* body = ParseStatement(NULL, CHECK_OK);
+ Statement* body = ParseSubStatement(NULL, CHECK_OK);
if (loop != NULL) loop->Initialize(cond, body);
return loop;
}
-bool Parser::CheckInOrOf(bool accept_OF,
- ForEachStatement::VisitMode* visit_mode) {
- if (Check(Token::IN)) {
- *visit_mode = ForEachStatement::ENUMERATE;
- return true;
- } else if (accept_OF && CheckContextualKeyword(CStrVector("of"))) {
- *visit_mode = ForEachStatement::ITERATE;
- return true;
- }
- return false;
-}
-
-
void Parser::InitializeForEachStatement(ForEachStatement* stmt,
Expression* each,
Expression* subject,
@@ -3034,8 +3017,7 @@ Statement* Parser::DesugarLetBindingsInForStatement(
// For each let variable x:
// make statement: temp_x = x.
for (int i = 0; i < names->length(); i++) {
- VariableProxy* proxy =
- NewUnresolved(names->at(i), LET, Interface::NewValue());
+ VariableProxy* proxy = NewUnresolved(names->at(i), LET);
Variable* temp = scope_->DeclarationScope()->NewTemporary(temp_name);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
Assignment* assignment = factory()->NewAssignment(
@@ -3079,8 +3061,7 @@ Statement* Parser::DesugarLetBindingsInForStatement(
// For each let variable x:
// make statement: let x = temp_x.
for (int i = 0; i < names->length(); i++) {
- VariableProxy* proxy =
- NewUnresolved(names->at(i), LET, Interface::NewValue());
+ VariableProxy* proxy = NewUnresolved(names->at(i), LET);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, LET, scope_, pos);
Declare(declaration, true, CHECK_OK);
@@ -3230,8 +3211,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
bool is_let_identifier_expression = false;
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR ||
- (peek() == Token::CONST && strict_mode() == SLOPPY)) {
- bool is_const = peek() == Token::CONST;
+ (peek() == Token::CONST && is_sloppy(language_mode()))) {
const AstRawString* name = NULL;
VariableDeclarationProperties decl_props = kHasNoInitializers;
Block* variable_statement =
@@ -3241,9 +3221,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
ForEachStatement::VisitMode mode;
int each_pos = position();
- if (name != NULL && CheckInOrOf(accept_OF, &mode)) {
- Interface* interface =
- is_const ? Interface::NewConst() : Interface::NewValue();
+ if (name != NULL && CheckInOrOf(accept_OF, &mode, ok)) {
+ if (!*ok) return nullptr;
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
Target target(&this->target_stack_, loop);
@@ -3251,9 +3230,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- VariableProxy* each =
- scope_->NewUnresolved(factory(), name, interface, each_pos);
- Statement* body = ParseStatement(NULL, CHECK_OK);
+ VariableProxy* each = scope_->NewUnresolved(factory(), name, each_pos);
+ Statement* body = ParseSubStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, each, enumerable, body);
Block* result =
factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
@@ -3269,7 +3247,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
init = variable_statement;
}
} else if ((peek() == Token::LET || peek() == Token::CONST) &&
- strict_mode() == STRICT) {
+ is_strict(language_mode())) {
bool is_const = peek() == Token::CONST;
const AstRawString* name = NULL;
VariableDeclarationProperties decl_props = kHasNoInitializers;
@@ -3281,7 +3259,9 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
ForEachStatement::VisitMode mode;
int each_pos = position();
- if (accept_IN && CheckInOrOf(accept_OF, &mode)) {
+ if (accept_IN && CheckInOrOf(accept_OF, &mode, ok)) {
+ if (!*ok) return nullptr;
+
// Rewrite a for-in statement of the form
//
// for (let/const x in e) b
@@ -3310,9 +3290,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
scope_ = for_scope;
Expect(Token::RPAREN, CHECK_OK);
- VariableProxy* each = scope_->NewUnresolved(
- factory(), name, Interface::NewValue(), each_pos);
- Statement* body = ParseStatement(NULL, CHECK_OK);
+ VariableProxy* each = scope_->NewUnresolved(factory(), name, each_pos);
+ Statement* body = ParseSubStatement(NULL, CHECK_OK);
Block* body_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
Token::Value init_op = is_const ? Token::INIT_CONST : Token::ASSIGN;
@@ -3344,7 +3323,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
expression->AsVariableProxy()->raw_name() ==
ast_value_factory()->let_string();
- if (CheckInOrOf(accept_OF, &mode)) {
+ if (CheckInOrOf(accept_OF, &mode, ok)) {
+ if (!*ok) return nullptr;
expression = this->CheckAndRewriteReferenceExpression(
expression, lhs_location, "invalid_lhs_in_for", CHECK_OK);
@@ -3355,7 +3335,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Statement* body = ParseStatement(NULL, CHECK_OK);
+ Statement* body = ParseSubStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, expression, enumerable, body);
scope_ = saved_scope;
for_scope->set_end_position(scanner()->location().end_pos);
@@ -3376,7 +3356,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// Parsed initializer at this point.
// Detect attempts at 'let' declarations in sloppy mode.
- if (peek() == Token::IDENTIFIER && strict_mode() == SLOPPY &&
+ if (peek() == Token::IDENTIFIER && is_sloppy(language_mode()) &&
is_let_identifier_expression) {
ReportMessage("sloppy_lexical", NULL);
*ok = false;
@@ -3407,7 +3387,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
}
Expect(Token::RPAREN, CHECK_OK);
- Statement* body = ParseStatement(NULL, CHECK_OK);
+ Statement* body = ParseSubStatement(NULL, CHECK_OK);
Statement* result = NULL;
if (let_bindings.length() > 0) {
@@ -3560,6 +3540,10 @@ int ParserTraits::DeclareArrowParametersFromExpression(
Expression* expression, Scope* scope, Scanner::Location* dupe_loc,
bool* ok) {
int num_params = 0;
+ // Always reset the flag: It only needs to be set for the first expression
+ // parsed as arrow function parameter list, becauseonly top-level functions
+ // are parsed lazily.
+ parser_->parsing_lazy_arrow_parameters_ = false;
*ok = CheckAndDeclareArrowParameter(this, expression, scope, &num_params,
dupe_loc);
return num_params;
@@ -3627,11 +3611,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope =
function_type == FunctionLiteral::DECLARATION &&
- (!allow_harmony_scoping() || strict_mode() == SLOPPY) &&
- (original_scope_ == original_declaration_scope ||
- declaration_scope != original_declaration_scope)
- ? NewScope(declaration_scope, FUNCTION_SCOPE)
- : NewScope(scope_, FUNCTION_SCOPE);
+ (!allow_harmony_scoping() || is_sloppy(language_mode())) &&
+ (original_scope_ == original_declaration_scope ||
+ declaration_scope != original_declaration_scope)
+ ? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
+ : NewScope(scope_, FUNCTION_SCOPE, kind);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -3644,7 +3628,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Parse function body.
{
AstNodeFactory function_factory(ast_value_factory());
- FunctionState function_state(&function_state_, &scope_, scope,
+ FunctionState function_state(&function_state_, &scope_, scope, kind,
&function_factory);
scope_->SetScopeName(function_name);
@@ -3670,32 +3654,39 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// We don't yet know if the function will be strict, so we cannot yet
// produce errors for parameter names or duplicates. However, we remember
// the locations of these errors if they occur and produce the errors later.
- Scanner::Location eval_args_error_log = Scanner::Location::invalid();
+ Scanner::Location eval_args_error_loc = Scanner::Location::invalid();
Scanner::Location dupe_error_loc = Scanner::Location::invalid();
- Scanner::Location reserved_loc = Scanner::Location::invalid();
+ Scanner::Location reserved_error_loc = Scanner::Location::invalid();
+ bool is_rest = false;
bool done = arity_restriction == FunctionLiteral::GETTER_ARITY ||
(peek() == Token::RPAREN &&
arity_restriction != FunctionLiteral::SETTER_ARITY);
while (!done) {
bool is_strict_reserved = false;
+ is_rest = peek() == Token::ELLIPSIS && allow_harmony_rest_params();
+ if (is_rest) {
+ Consume(Token::ELLIPSIS);
+ }
+
const AstRawString* param_name =
ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
// Store locations for possible future error reports.
- if (!eval_args_error_log.IsValid() && IsEvalOrArguments(param_name)) {
- eval_args_error_log = scanner()->location();
+ if (!eval_args_error_loc.IsValid() && IsEvalOrArguments(param_name)) {
+ eval_args_error_loc = scanner()->location();
}
- if (!reserved_loc.IsValid() && is_strict_reserved) {
- reserved_loc = scanner()->location();
+ if (!reserved_error_loc.IsValid() && is_strict_reserved) {
+ reserved_error_loc = scanner()->location();
}
- if (!dupe_error_loc.IsValid() && scope_->IsDeclared(param_name)) {
+ if (!dupe_error_loc.IsValid() &&
+ scope_->IsDeclaredParameter(param_name)) {
duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
dupe_error_loc = scanner()->location();
}
- Variable* var = scope_->DeclareParameter(param_name, VAR);
- if (scope->strict_mode() == SLOPPY) {
+ Variable* var = scope_->DeclareParameter(param_name, VAR, is_rest);
+ if (is_sloppy(scope->language_mode())) {
// TODO(sigurds) Mark every parameter as maybe assigned. This is a
// conservative approximation necessary to account for parameters
// that are assigned via the arguments array.
@@ -3710,7 +3701,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
if (arity_restriction == FunctionLiteral::SETTER_ARITY) break;
done = (peek() == Token::RPAREN);
- if (!done) Expect(Token::COMMA, CHECK_OK);
+ if (!done) {
+ if (is_rest) {
+ ReportMessageAt(scanner()->peek_location(), "param_after_rest");
+ *ok = false;
+ return NULL;
+ }
+ Expect(Token::COMMA, CHECK_OK);
+ }
}
Expect(Token::RPAREN, CHECK_OK);
@@ -3725,17 +3723,16 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Variable* fvar = NULL;
Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
- if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ if (allow_harmony_scoping() && is_strict(language_mode())) {
fvar_init_op = Token::INIT_CONST;
}
VariableMode fvar_mode =
- allow_harmony_scoping() && strict_mode() == STRICT
- ? CONST : CONST_LEGACY;
+ allow_harmony_scoping() && is_strict(language_mode()) ? CONST
+ : CONST_LEGACY;
DCHECK(function_name != NULL);
fvar = new (zone())
Variable(scope_, function_name, fvar_mode, true /* is valid LHS */,
- Variable::NORMAL, kCreatedInitialized, kNotAssigned,
- Interface::NewConst());
+ Variable::NORMAL, kCreatedInitialized, kNotAssigned);
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
@@ -3785,28 +3782,27 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
&expected_property_count, CHECK_OK);
} else {
body = ParseEagerFunctionBody(function_name, pos, fvar, fvar_init_op,
- is_generator, CHECK_OK);
+ kind, CHECK_OK);
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
handler_count = function_state.handler_count();
}
- // Validate strict mode.
- // Concise methods use StrictFormalParameters.
- if (strict_mode() == STRICT || IsConciseMethod(kind)) {
- CheckStrictFunctionNameAndParameters(function_name,
- name_is_strict_reserved,
- function_name_location,
- eval_args_error_log,
- dupe_error_loc,
- reserved_loc,
- CHECK_OK);
- }
- if (strict_mode() == STRICT) {
+ // Validate name and parameter names. We can do this only after parsing the
+ // function, since the function can declare itself strict.
+ CheckFunctionName(language_mode(), kind, function_name,
+ name_is_strict_reserved, function_name_location,
+ CHECK_OK);
+ const bool use_strict_params = is_rest || IsConciseMethod(kind);
+ CheckFunctionParameterNames(language_mode(), use_strict_params,
+ eval_args_error_loc, dupe_error_loc,
+ reserved_error_loc, CHECK_OK);
+
+ if (is_strict(language_mode())) {
CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
CHECK_OK);
}
- if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ if (allow_harmony_scoping() && is_strict(language_mode())) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
}
@@ -3818,6 +3814,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral::kIsFunction, parenthesized, kind, pos);
function_literal->set_function_token_position(function_token_pos);
+ if (scope->has_rest_parameter()) {
+ // TODO(caitp): enable optimization of functions with rest params
+ function_literal->set_dont_optimize_reason(kRestParameter);
+ }
+
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
return function_literal;
}
@@ -3849,7 +3850,8 @@ void Parser::SkipLazyFunctionBody(const AstRawString* function_name,
total_preparse_skipped_ += scope_->end_position() - function_block_pos;
*materialized_literal_count = entry.literal_count();
*expected_property_count = entry.property_count();
- scope_->SetStrictMode(entry.strict_mode());
+ scope_->SetLanguageMode(entry.language_mode());
+ if (entry.uses_super_property()) scope_->RecordSuperPropertyUsage();
return;
}
cached_parse_data_->Reject();
@@ -3880,27 +3882,48 @@ void Parser::SkipLazyFunctionBody(const AstRawString* function_name,
total_preparse_skipped_ += scope_->end_position() - function_block_pos;
*materialized_literal_count = logger.literals();
*expected_property_count = logger.properties();
- scope_->SetStrictMode(logger.strict_mode());
+ scope_->SetLanguageMode(logger.language_mode());
+ if (logger.scope_uses_super_property()) {
+ scope_->RecordSuperPropertyUsage();
+ }
if (produce_cached_parse_data()) {
DCHECK(log_);
// Position right after terminal '}'.
int body_end = scanner()->location().end_pos;
log_->LogFunction(function_block_pos, body_end, *materialized_literal_count,
- *expected_property_count, scope_->strict_mode());
+ *expected_property_count, scope_->language_mode(),
+ scope_->uses_super_property());
}
}
+void Parser::AddAssertIsConstruct(ZoneList<Statement*>* body, int pos) {
+ ZoneList<Expression*>* arguments =
+ new (zone()) ZoneList<Expression*>(0, zone());
+ CallRuntime* construct_check = factory()->NewCallRuntime(
+ ast_value_factory()->is_construct_call_string(),
+ Runtime::FunctionForId(Runtime::kInlineIsConstructCall), arguments, pos);
+ CallRuntime* non_callable_error = factory()->NewCallRuntime(
+ ast_value_factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kThrowConstructorNonCallableError),
+ arguments, pos);
+ IfStatement* if_statement = factory()->NewIfStatement(
+ factory()->NewUnaryOperation(Token::NOT, construct_check, pos),
+ factory()->NewReturnStatement(non_callable_error, pos),
+ factory()->NewEmptyStatement(pos), pos);
+ body->Add(if_statement, zone());
+}
+
+
ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
const AstRawString* function_name, int pos, Variable* fvar,
- Token::Value fvar_init_op, bool is_generator, bool* ok) {
+ Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
// Everything inside an eagerly parsed function will be parsed eagerly
// (see comment above).
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8, zone());
if (fvar != NULL) {
- VariableProxy* fproxy = scope_->NewUnresolved(
- factory(), function_name, Interface::NewConst());
+ VariableProxy* fproxy = scope_->NewUnresolved(factory(), function_name);
fproxy->BindTo(fvar);
body->Add(factory()->NewExpressionStatement(
factory()->NewAssignment(fvar_init_op,
@@ -3910,8 +3933,15 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
RelocInfo::kNoPosition), zone());
}
+
+ // For concise constructors, check that they are constructed,
+ // not called.
+ if (i::IsConstructor(kind)) {
+ AddAssertIsConstruct(body, pos);
+ }
+
// For generators, allocate and yield an iterator on function entry.
- if (is_generator) {
+ if (IsGeneratorFunction(kind)) {
ZoneList<Expression*>* arguments =
new(zone()) ZoneList<Expression*>(0, zone());
CallRuntime* allocation = factory()->NewCallRuntime(
@@ -3930,9 +3960,9 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
yield, RelocInfo::kNoPosition), zone());
}
- ParseSourceElements(body, Token::RBRACE, false, false, NULL, CHECK_OK);
+ ParseStatementList(body, Token::RBRACE, false, NULL, CHECK_OK);
- if (is_generator) {
+ if (IsGeneratorFunction(kind)) {
VariableProxy* get_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Expression* undefined =
@@ -3943,6 +3973,14 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
yield, RelocInfo::kNoPosition), zone());
}
+ if (IsSubclassConstructor(kind)) {
+ body->Add(
+ factory()->NewReturnStatement(
+ this->ThisExpression(scope_, factory(), RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
+ }
+
Expect(Token::RBRACE, CHECK_OK);
scope_->set_end_position(scanner()->location().end_pos);
@@ -3960,7 +3998,8 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
DCHECK_EQ(Token::LBRACE, scanner()->current_token());
if (reusable_preparser_ == NULL) {
- reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit_);
+ reusable_preparser_ = new PreParser(zone(), &scanner_, ast_value_factory(),
+ NULL, stack_limit_);
reusable_preparser_->set_allow_lazy(true);
reusable_preparser_->set_allow_natives(allow_natives());
reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
@@ -3975,11 +4014,14 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
reusable_preparser_->set_allow_harmony_templates(allow_harmony_templates());
reusable_preparser_->set_allow_harmony_sloppy(allow_harmony_sloppy());
reusable_preparser_->set_allow_harmony_unicode(allow_harmony_unicode());
- }
- PreParser::PreParseResult result =
- reusable_preparser_->PreParseLazyFunction(strict_mode(),
- is_generator(),
- logger);
+ reusable_preparser_->set_allow_harmony_computed_property_names(
+ allow_harmony_computed_property_names());
+ reusable_preparser_->set_allow_harmony_rest_params(
+ allow_harmony_rest_params());
+ reusable_preparser_->set_allow_strong_mode(allow_strong_mode());
+ }
+ PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
+ language_mode(), function_state_->kind(), logger);
if (pre_parse_timer_ != NULL) {
pre_parse_timer_->Stop();
}
@@ -4005,12 +4047,13 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
BlockState block_state(&scope_, block_scope);
- scope_->SetStrictMode(STRICT);
+ scope_->SetLanguageMode(
+ static_cast<LanguageMode>(scope_->language_mode() | STRICT_BIT));
scope_->SetScopeName(name);
VariableProxy* proxy = NULL;
if (name != NULL) {
- proxy = NewUnresolved(name, CONST, Interface::NewConst());
+ proxy = NewUnresolved(name, CONST);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, CONST, block_scope, pos);
Declare(declaration, true, CHECK_OK);
@@ -4024,21 +4067,28 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
block_scope->set_start_position(scanner()->location().end_pos);
}
+
+ ClassLiteralChecker checker(this);
ZoneList<ObjectLiteral::Property*>* properties = NewPropertyList(4, zone());
- Expression* constructor = NULL;
+ FunctionLiteral* constructor = NULL;
bool has_seen_constructor = false;
Expect(Token::LBRACE, CHECK_OK);
+ const bool has_extends = extends != nullptr;
while (peek() != Token::RBRACE) {
if (Check(Token::SEMICOLON)) continue;
if (fni_ != NULL) fni_->Enter();
const bool in_class = true;
const bool is_static = false;
+ bool is_computed_name = false; // Classes do not care about computed
+ // property names here.
ObjectLiteral::Property* property = ParsePropertyDefinition(
- NULL, in_class, is_static, &has_seen_constructor, CHECK_OK);
+ &checker, in_class, has_extends, is_static, &is_computed_name,
+ &has_seen_constructor, CHECK_OK);
if (has_seen_constructor && constructor == NULL) {
- constructor = GetPropertyValue(property);
+ constructor = GetPropertyValue(property)->AsFunctionLiteral();
+ DCHECK_NOT_NULL(constructor);
} else {
properties->Add(property, zone());
}
@@ -4153,9 +4203,7 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
bool Parser::TargetStackContainsLabel(const AstRawString* label) {
for (Target* t = target_stack_; t != NULL; t = t->previous()) {
- BreakableStatement* stat = t->node()->AsBreakableStatement();
- if (stat != NULL && ContainsLabel(stat->labels(), label))
- return true;
+ if (ContainsLabel(t->statement()->labels(), label)) return true;
}
return false;
}
@@ -4165,11 +4213,9 @@ BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label,
bool* ok) {
bool anonymous = label == NULL;
for (Target* t = target_stack_; t != NULL; t = t->previous()) {
- BreakableStatement* stat = t->node()->AsBreakableStatement();
- if (stat == NULL) continue;
+ BreakableStatement* stat = t->statement();
if ((anonymous && stat->is_target_for_anonymous()) ||
(!anonymous && ContainsLabel(stat->labels(), label))) {
- RegisterTargetUse(stat->break_target(), t->previous());
return stat;
}
}
@@ -4181,12 +4227,11 @@ IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
bool* ok) {
bool anonymous = label == NULL;
for (Target* t = target_stack_; t != NULL; t = t->previous()) {
- IterationStatement* stat = t->node()->AsIterationStatement();
+ IterationStatement* stat = t->statement()->AsIterationStatement();
if (stat == NULL) continue;
DCHECK(stat->is_target_for_anonymous());
if (anonymous || ContainsLabel(stat->labels(), label)) {
- RegisterTargetUse(stat->continue_target(), t->previous());
return stat;
}
}
@@ -4194,36 +4239,26 @@ IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
}
-void Parser::RegisterTargetUse(Label* target, Target* stop) {
- // Register that a break target found at the given stop in the
- // target stack has been used from the top of the target stack. Add
- // the break target to any TargetCollectors passed on the stack.
- for (Target* t = target_stack_; t != stop; t = t->previous()) {
- TargetCollector* collector = t->node()->AsTargetCollector();
- if (collector != NULL) collector->AddTarget(target, zone());
- }
-}
-
-
-void Parser::HandleSourceURLComments() {
+void Parser::HandleSourceURLComments(CompilationInfo* info) {
if (scanner_.source_url()->length() > 0) {
- Handle<String> source_url = scanner_.source_url()->Internalize(isolate());
- info_->script()->set_source_url(*source_url);
+ Handle<String> source_url =
+ scanner_.source_url()->Internalize(info->isolate());
+ info->script()->set_source_url(*source_url);
}
if (scanner_.source_mapping_url()->length() > 0) {
Handle<String> source_mapping_url =
- scanner_.source_mapping_url()->Internalize(isolate());
- info_->script()->set_source_mapping_url(*source_mapping_url);
+ scanner_.source_mapping_url()->Internalize(info->isolate());
+ info->script()->set_source_mapping_url(*source_mapping_url);
}
}
-void Parser::ThrowPendingError() {
+void Parser::ThrowPendingError(Isolate* isolate, Handle<Script> script) {
DCHECK(ast_value_factory()->IsInternalized());
if (has_pending_error_) {
- MessageLocation location(script(), pending_error_location_.beg_pos,
+ MessageLocation location(script, pending_error_location_.beg_pos,
pending_error_location_.end_pos);
- Factory* factory = isolate()->factory();
+ Factory* factory = isolate->factory();
bool has_arg =
pending_error_arg_ != NULL || pending_error_char_arg_ != NULL;
Handle<FixedArray> elements = factory->NewFixedArray(has_arg ? 1 : 0);
@@ -4236,7 +4271,7 @@ void Parser::ThrowPendingError() {
.ToHandleChecked();
elements->set(0, *arg_string);
}
- isolate()->debug()->OnCompileError(script());
+ isolate->debug()->OnCompileError(script);
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
Handle<Object> error;
@@ -4244,21 +4279,39 @@ void Parser::ThrowPendingError() {
pending_error_is_reference_error_
? factory->NewReferenceError(pending_error_message_, array)
: factory->NewSyntaxError(pending_error_message_, array);
- if (maybe_error.ToHandle(&error)) isolate()->Throw(*error, &location);
+
+ if (maybe_error.ToHandle(&error)) {
+ Handle<JSObject> jserror = Handle<JSObject>::cast(error);
+
+ Handle<Name> key_start_pos = factory->error_start_pos_symbol();
+ JSObject::SetProperty(jserror, key_start_pos,
+ handle(Smi::FromInt(location.start_pos()), isolate),
+ SLOPPY).Check();
+
+ Handle<Name> key_end_pos = factory->error_end_pos_symbol();
+ JSObject::SetProperty(jserror, key_end_pos,
+ handle(Smi::FromInt(location.end_pos()), isolate),
+ SLOPPY).Check();
+
+ Handle<Name> key_script = factory->error_script_symbol();
+ JSObject::SetProperty(jserror, key_script, script, SLOPPY).Check();
+
+ isolate->Throw(*error, &location);
+ }
}
}
-void Parser::Internalize() {
+void Parser::Internalize(CompilationInfo* info) {
// Internalize strings.
- ast_value_factory()->Internalize(isolate());
+ ast_value_factory()->Internalize(info->isolate());
// Error processing.
- if (info()->function() == NULL) {
+ if (info->function() == NULL) {
if (stack_overflow()) {
- isolate()->StackOverflow();
+ info->isolate()->StackOverflow();
} else {
- ThrowPendingError();
+ ThrowPendingError(info->isolate(), info->script());
}
}
@@ -4266,10 +4319,10 @@ void Parser::Internalize() {
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
for (int i = 0; i < use_counts_[feature]; ++i) {
- isolate()->CountUsage(v8::Isolate::UseCounterFeature(feature));
+ info->isolate()->CountUsage(v8::Isolate::UseCounterFeature(feature));
}
}
- isolate()->counters()->total_preparse_skipped()->Increment(
+ info->isolate()->counters()->total_preparse_skipped()->Increment(
total_preparse_skipped_);
}
@@ -4278,11 +4331,10 @@ void Parser::Internalize() {
// Regular expressions
-RegExpParser::RegExpParser(FlatStringReader* in,
- Handle<String>* error,
- bool multiline,
+RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
+ bool multiline, bool unicode, Isolate* isolate,
Zone* zone)
- : isolate_(zone->isolate()),
+ : isolate_(isolate),
zone_(zone),
error_(error),
captures_(NULL),
@@ -4292,6 +4344,7 @@ RegExpParser::RegExpParser(FlatStringReader* in,
capture_count_(0),
has_more_(true),
multiline_(multiline),
+ unicode_(unicode),
simple_(false),
contains_anchor_(false),
is_scanned_for_captures_(false),
@@ -4348,6 +4401,13 @@ bool RegExpParser::simple() {
}
+bool RegExpParser::IsSyntaxCharacter(uc32 c) {
+ return c == '^' || c == '$' || c == '\\' || c == '.' || c == '*' ||
+ c == '+' || c == '?' || c == '(' || c == ')' || c == '[' || c == ']' ||
+ c == '{' || c == '}' || c == '|';
+}
+
+
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
failed_ = true;
*error_ = isolate()->factory()->NewStringFromAscii(message).ToHandleChecked();
@@ -4564,9 +4624,15 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
uc32 first_digit = Next();
if (first_digit == '8' || first_digit == '9') {
- // Treat as identity escape
- builder->AddCharacter(first_digit);
- Advance(2);
+ // If the 'u' flag is present, only syntax characters can be escaped,
+ // no other identity escapes are allowed. If the 'u' flag is not
+ // present, all identity escapes are allowed.
+ if (!FLAG_harmony_unicode_regexps || !unicode_) {
+ builder->AddCharacter(first_digit);
+ Advance(2);
+ } else {
+ return ReportError(CStrVector("Invalid escape"));
+ }
break;
}
}
@@ -4622,25 +4688,41 @@ RegExpTree* RegExpParser::ParseDisjunction() {
uc32 value;
if (ParseHexEscape(2, &value)) {
builder->AddCharacter(value);
- } else {
+ } else if (!FLAG_harmony_unicode_regexps || !unicode_) {
builder->AddCharacter('x');
+ } else {
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ return ReportError(CStrVector("Invalid escape"));
}
break;
}
case 'u': {
Advance(2);
uc32 value;
- if (ParseHexEscape(4, &value)) {
+ if (ParseUnicodeEscape(&value)) {
builder->AddCharacter(value);
- } else {
+ } else if (!FLAG_harmony_unicode_regexps || !unicode_) {
builder->AddCharacter('u');
+ } else {
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ return ReportError(CStrVector("Invalid unicode escape"));
}
break;
}
default:
- // Identity escape.
- builder->AddCharacter(Next());
- Advance(2);
+ Advance();
+ // If the 'u' flag is present, only syntax characters can be escaped, no
+ // other identity escapes are allowed. If the 'u' flag is not present,
+ // all identity escapes are allowed.
+ if (!FLAG_harmony_unicode_regexps || !unicode_ ||
+ IsSyntaxCharacter(current())) {
+ builder->AddCharacter(current());
+ Advance();
+ } else {
+ return ReportError(CStrVector("Invalid escape"));
+ }
break;
}
break;
@@ -4883,11 +4965,10 @@ uc32 RegExpParser::ParseOctalLiteral() {
}
-bool RegExpParser::ParseHexEscape(int length, uc32 *value) {
+bool RegExpParser::ParseHexEscape(int length, uc32* value) {
int start = position();
uc32 val = 0;
- bool done = false;
- for (int i = 0; !done; i++) {
+ for (int i = 0; i < length; ++i) {
uc32 c = current();
int d = HexValue(c);
if (d < 0) {
@@ -4896,15 +4977,52 @@ bool RegExpParser::ParseHexEscape(int length, uc32 *value) {
}
val = val * 16 + d;
Advance();
- if (i == length - 1) {
- done = true;
- }
}
*value = val;
return true;
}
+bool RegExpParser::ParseUnicodeEscape(uc32* value) {
+ // Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
+ // allowed). In the latter case, the number of hex digits between { } is
+ // arbitrary. \ and u have already been read.
+ if (current() == '{' && FLAG_harmony_unicode_regexps && unicode_) {
+ int start = position();
+ Advance();
+ if (ParseUnlimitedLengthHexNumber(0x10ffff, value)) {
+ if (current() == '}') {
+ Advance();
+ return true;
+ }
+ }
+ Reset(start);
+ return false;
+ }
+ // \u but no {, or \u{...} escapes not allowed.
+ return ParseHexEscape(4, value);
+}
+
+
+bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
+ uc32 x = 0;
+ int d = HexValue(current());
+ if (d < 0) {
+ return false;
+ }
+ while (d >= 0) {
+ x = x * 16 + d;
+ if (x > max_value) {
+ return false;
+ }
+ Advance();
+ d = HexValue(current());
+ }
+ *value = x;
+ return true;
+}
+
+
uc32 RegExpParser::ParseClassCharacterEscape() {
DCHECK(current() == '\\');
DCHECK(has_next() && !IsSpecialClassEscape(Next()));
@@ -4959,27 +5077,42 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
if (ParseHexEscape(2, &value)) {
return value;
}
- // If \x is not followed by a two-digit hexadecimal, treat it
- // as an identity escape.
- return 'x';
+ if (!FLAG_harmony_unicode_regexps || !unicode_) {
+ // If \x is not followed by a two-digit hexadecimal, treat it
+ // as an identity escape.
+ return 'x';
+ }
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ ReportError(CStrVector("Invalid escape"));
+ return 0;
}
case 'u': {
Advance();
uc32 value;
- if (ParseHexEscape(4, &value)) {
+ if (ParseUnicodeEscape(&value)) {
return value;
}
- // If \u is not followed by a four-digit hexadecimal, treat it
- // as an identity escape.
- return 'u';
+ if (!FLAG_harmony_unicode_regexps || !unicode_) {
+ return 'u';
+ }
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ ReportError(CStrVector("Invalid unicode escape"));
+ return 0;
}
default: {
- // Extended identity escape. We accept any character that hasn't
- // been matched by a more specific case, not just the subset required
- // by the ECMAScript specification.
uc32 result = current();
- Advance();
- return result;
+ // If the 'u' flag is present, only syntax characters can be escaped, no
+ // other identity escapes are allowed. If the 'u' flag is not present, all
+ // identity escapes are allowed.
+ if (!FLAG_harmony_unicode_regexps || !unicode_ ||
+ IsSyntaxCharacter(result)) {
+ Advance();
+ return result;
+ }
+ ReportError(CStrVector("Invalid escape"));
+ return 0;
}
}
return 0;
@@ -5085,12 +5218,11 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
// ----------------------------------------------------------------------------
// The Parser interface.
-bool RegExpParser::ParseRegExp(FlatStringReader* input,
- bool multiline,
- RegExpCompileData* result,
- Zone* zone) {
+bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
+ FlatStringReader* input, bool multiline,
+ bool unicode, RegExpCompileData* result) {
DCHECK(result != NULL);
- RegExpParser parser(input, &result->error, multiline, zone);
+ RegExpParser parser(input, &result->error, multiline, unicode, isolate, zone);
RegExpTree* tree = parser.ParsePattern();
if (parser.failed()) {
DCHECK(tree == NULL);
@@ -5108,49 +5240,67 @@ bool RegExpParser::ParseRegExp(FlatStringReader* input,
}
-bool Parser::Parse() {
- DCHECK(info()->function() == NULL);
+bool Parser::ParseStatic(CompilationInfo* info, bool allow_lazy) {
+ Parser parser(info, info->isolate()->stack_guard()->real_climit(),
+ info->isolate()->heap()->HashSeed(),
+ info->isolate()->unicode_cache());
+ parser.set_allow_lazy(allow_lazy);
+ if (parser.Parse(info)) {
+ info->SetLanguageMode(info->function()->language_mode());
+ return true;
+ }
+ return false;
+}
+
+
+bool Parser::Parse(CompilationInfo* info) {
+ DCHECK(info->function() == NULL);
FunctionLiteral* result = NULL;
- pre_parse_timer_ = isolate()->counters()->pre_parse();
+ // Ok to use Isolate here; this function is only called in the main thread.
+ DCHECK(parsing_on_main_thread_);
+ Isolate* isolate = info->isolate();
+ pre_parse_timer_ = isolate->counters()->pre_parse();
if (FLAG_trace_parse || allow_natives() || extension_ != NULL) {
// If intrinsics are allowed, the Parser cannot operate independent of the
// V8 heap because of Runtime. Tell the string table to internalize strings
// and values right after they're created.
- ast_value_factory()->Internalize(isolate());
+ ast_value_factory()->Internalize(isolate);
}
- if (info()->is_lazy()) {
- DCHECK(!info()->is_eval());
- if (info()->shared_info()->is_function()) {
- result = ParseLazy();
+ if (info->is_lazy()) {
+ DCHECK(!info->is_eval());
+ if (info->shared_info()->is_function()) {
+ result = ParseLazy(info);
} else {
- result = ParseProgram();
+ result = ParseProgram(info);
}
} else {
- SetCachedData();
- result = ParseProgram();
+ SetCachedData(info);
+ result = ParseProgram(info);
}
- info()->SetFunction(result);
+ info->SetFunction(result);
- Internalize();
+ Internalize(info);
DCHECK(ast_value_factory()->IsInternalized());
return (result != NULL);
}
-void Parser::ParseOnBackground() {
- DCHECK(info()->function() == NULL);
+void Parser::ParseOnBackground(CompilationInfo* info) {
+ parsing_on_main_thread_ = false;
+
+ DCHECK(info->function() == NULL);
FunctionLiteral* result = NULL;
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
CompleteParserRecorder recorder;
if (produce_cached_parse_data()) log_ = &recorder;
- DCHECK(info()->source_stream() != NULL);
- ExternalStreamingStream stream(info()->source_stream(),
- info()->source_stream_encoding());
+ DCHECK(info->source_stream() != NULL);
+ ExternalStreamingStream stream(info->source_stream(),
+ info->source_stream_encoding());
scanner_.Initialize(&stream);
- DCHECK(info()->context().is_null() || info()->context()->IsNativeContext());
+ DCHECK(info->context().is_null() || info->context()->IsNativeContext());
// When streaming, we don't know the length of the source until we have parsed
// it. The raw data can be UTF-8, so we wouldn't know the source length until
@@ -5160,20 +5310,20 @@ void Parser::ParseOnBackground() {
// scopes) and set their end position after we know the script length.
Scope* top_scope = NULL;
Scope* eval_scope = NULL;
- result = DoParseProgram(info(), &top_scope, &eval_scope);
+ result = DoParseProgram(info, &top_scope, &eval_scope);
top_scope->set_end_position(scanner()->location().end_pos);
if (eval_scope != NULL) {
eval_scope->set_end_position(scanner()->location().end_pos);
}
- info()->SetFunction(result);
+ info->SetFunction(result);
// We cannot internalize on a background thread; a foreground task will take
// care of calling Parser::Internalize just before compilation.
if (produce_cached_parse_data()) {
- if (result != NULL) *info_->cached_data() = recorder.GetScriptData();
+ if (result != NULL) *info->cached_data() = recorder.GetScriptData();
log_ = NULL;
}
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 219f1c4b21..713133a679 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -31,7 +31,8 @@ class FunctionEntry BASE_EMBEDDED {
kEndPositionIndex,
kLiteralCountIndex,
kPropertyCountIndex,
- kStrictModeIndex,
+ kLanguageModeIndex,
+ kUsesSuperPropertyIndex,
kSize
};
@@ -44,11 +45,11 @@ class FunctionEntry BASE_EMBEDDED {
int end_pos() { return backing_[kEndPositionIndex]; }
int literal_count() { return backing_[kLiteralCountIndex]; }
int property_count() { return backing_[kPropertyCountIndex]; }
- StrictMode strict_mode() {
- DCHECK(backing_[kStrictModeIndex] == SLOPPY ||
- backing_[kStrictModeIndex] == STRICT);
- return static_cast<StrictMode>(backing_[kStrictModeIndex]);
+ LanguageMode language_mode() {
+ DCHECK(is_valid_language_mode(backing_[kLanguageModeIndex]));
+ return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
}
+ bool uses_super_property() { return backing_[kUsesSuperPropertyIndex]; }
bool is_valid() { return !backing_.is_empty(); }
@@ -222,15 +223,12 @@ class RegExpBuilder: public ZoneObject {
class RegExpParser BASE_EMBEDDED {
public:
- RegExpParser(FlatStringReader* in,
- Handle<String>* error,
- bool multiline_mode,
- Zone* zone);
+ RegExpParser(FlatStringReader* in, Handle<String>* error, bool multiline_mode,
+ bool unicode, Isolate* isolate, Zone* zone);
- static bool ParseRegExp(FlatStringReader* input,
- bool multiline,
- RegExpCompileData* result,
- Zone* zone);
+ static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
+ bool multiline, bool unicode,
+ RegExpCompileData* result);
RegExpTree* ParsePattern();
RegExpTree* ParseDisjunction();
@@ -248,6 +246,8 @@ class RegExpParser BASE_EMBEDDED {
// Checks whether the following is a length-digit hexadecimal number,
// and sets the value if it is.
bool ParseHexEscape(int length, uc32* value);
+ bool ParseUnicodeEscape(uc32* value);
+ bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
uc32 ParseOctalLiteral();
@@ -272,6 +272,8 @@ class RegExpParser BASE_EMBEDDED {
int position() { return next_pos_ - 1; }
bool failed() { return failed_; }
+ static bool IsSyntaxCharacter(uc32 c);
+
static const int kMaxCaptures = 1 << 16;
static const uc32 kEndMarker = (1 << 21);
@@ -338,6 +340,7 @@ class RegExpParser BASE_EMBEDDED {
int capture_count_;
bool has_more_;
bool multiline_;
+ bool unicode_;
bool simple_;
bool contains_anchor_;
bool is_scanned_for_captures_;
@@ -357,16 +360,9 @@ class ParserTraits {
// it needs.
typedef v8::internal::Parser* Parser;
- // Used by FunctionState and BlockState.
- typedef v8::internal::Scope Scope;
- typedef v8::internal::Scope* ScopePtr;
- inline static Scope* ptr_to_scope(ScopePtr scope) { return scope; }
-
typedef Variable GeneratorVariable;
- typedef v8::internal::Zone Zone;
typedef v8::internal::AstProperties AstProperties;
- typedef Vector<VariableProxy*> ParameterIdentifierVector;
// Return types for traversing functions.
typedef const AstRawString* Identifier;
@@ -387,6 +383,8 @@ class ParserTraits {
explicit ParserTraits(Parser* parser) : parser_(parser) {}
// Helper functions for recursive descent.
+ bool IsEval(const AstRawString* identifier) const;
+ bool IsArguments(const AstRawString* identifier) const;
bool IsEvalOrArguments(const AstRawString* identifier) const;
V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const;
@@ -412,11 +410,6 @@ class ParserTraits {
return string->AsArrayIndex(index);
}
- bool IsConstructorProperty(ObjectLiteral::Property* property) {
- return property->key()->raw_value()->EqualsString(
- ast_value_factory()->constructor_string());
- }
-
static Expression* GetPropertyValue(ObjectLiteral::Property* property) {
return property->value();
}
@@ -426,7 +419,9 @@ class ParserTraits {
static void PushLiteralName(FuncNameInferrer* fni, const AstRawString* id) {
fni->PushLiteralName(id);
}
+
void PushPropertyName(FuncNameInferrer* fni, Expression* expression);
+
static void InferFunctionName(FuncNameInferrer* fni,
FunctionLiteral* func_to_infer) {
fni->AddFunction(func_to_infer);
@@ -564,13 +559,13 @@ class ParserTraits {
ZoneList<v8::internal::Statement*>* NewStatementList(int size, Zone* zone) {
return new(zone) ZoneList<v8::internal::Statement*>(size, zone);
}
- V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type);
+ V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type,
+ FunctionKind kind = kNormalFunction);
// Utility functions
int DeclareArrowParametersFromExpression(Expression* expression, Scope* scope,
Scanner::Location* dupe_loc,
bool* ok);
- V8_INLINE AstValueFactory* ast_value_factory();
// Temporary glue; these functions will move to ParserBase.
Expression* ParseV8Intrinsic(bool* ok);
@@ -584,7 +579,7 @@ class ParserTraits {
int* expected_property_count, bool* ok);
V8_INLINE ZoneList<Statement*>* ParseEagerFunctionBody(
const AstRawString* name, int pos, Variable* fvar,
- Token::Value fvar_init_op, bool is_generator, bool* ok);
+ Token::Value fvar_init_op, FunctionKind kind, bool* ok);
ClassLiteral* ParseClassLiteral(const AstRawString* name,
Scanner::Location class_name_location,
@@ -643,16 +638,8 @@ class ParserTraits {
class Parser : public ParserBase<ParserTraits> {
public:
- // Note that the hash seed in ParseInfo must be the hash seed from the
- // Isolate's heap, otherwise the heap will be in an inconsistent state once
- // the strings created by the Parser are internalized.
- struct ParseInfo {
- uintptr_t stack_limit;
- uint32_t hash_seed;
- UnicodeCache* unicode_cache;
- };
-
- Parser(CompilationInfo* info, ParseInfo* parse_info);
+ Parser(CompilationInfo* info, uintptr_t stack_limit, uint32_t hash_seed,
+ UnicodeCache* unicode_cache);
~Parser() {
delete reusable_preparser_;
reusable_preparser_ = NULL;
@@ -663,26 +650,14 @@ class Parser : public ParserBase<ParserTraits> {
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
// nodes) if parsing failed.
- static bool Parse(CompilationInfo* info,
- bool allow_lazy = false) {
- ParseInfo parse_info = {info->isolate()->stack_guard()->real_climit(),
- info->isolate()->heap()->HashSeed(),
- info->isolate()->unicode_cache()};
- Parser parser(info, &parse_info);
- parser.set_allow_lazy(allow_lazy);
- if (parser.Parse()) {
- info->SetStrictMode(info->function()->strict_mode());
- return true;
- }
- return false;
- }
- bool Parse();
- void ParseOnBackground();
+ static bool ParseStatic(CompilationInfo* info, bool allow_lazy = false);
+ bool Parse(CompilationInfo* info);
+ void ParseOnBackground(CompilationInfo* info);
// Handle errors detected during parsing, move statistics to Isolate,
// internalize strings (move them to the heap).
- void Internalize();
- void HandleSourceURLComments();
+ void Internalize(CompilationInfo* info);
+ void HandleSourceURLComments(CompilationInfo* info);
private:
friend class ParserTraits;
@@ -696,48 +671,29 @@ class Parser : public ParserBase<ParserTraits> {
// https://codereview.chromium.org/7003030/ ).
static const int kMaxNumFunctionLocals = 4194303; // 2^22-1
- enum VariableDeclarationContext {
- kModuleElement,
- kBlockElement,
- kStatement,
- kForStatement
- };
-
- // If a list of variable declarations includes any initializers.
- enum VariableDeclarationProperties {
- kHasInitializers,
- kHasNoInitializers
- };
-
// Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram();
-
- FunctionLiteral* ParseLazy();
- FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
+ FunctionLiteral* ParseProgram(CompilationInfo* info);
- Isolate* isolate() { return info_->isolate(); }
- CompilationInfo* info() const { return info_; }
- Handle<Script> script() const { return info_->script(); }
- AstValueFactory* ast_value_factory() const {
- return info_->ast_value_factory();
- }
+ FunctionLiteral* ParseLazy(CompilationInfo* info);
+ FunctionLiteral* ParseLazy(CompilationInfo* info,
+ Utf16CharacterStream* source);
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(CompilationInfo* info, Scope** scope,
Scope** ad_hoc_eval_scope);
- void SetCachedData();
+ void SetCachedData(CompilationInfo* info);
bool inside_with() const { return scope_->inside_with(); }
ScriptCompiler::CompileOptions compile_options() const {
- return info_->compile_options();
+ return compile_options_;
}
bool consume_cached_parse_data() const {
- return compile_options() == ScriptCompiler::kConsumeParserCache &&
+ return compile_options_ == ScriptCompiler::kConsumeParserCache &&
cached_parse_data_ != NULL;
}
bool produce_cached_parse_data() const {
- return compile_options() == ScriptCompiler::kProduceParserCache;
+ return compile_options_ == ScriptCompiler::kProduceParserCache;
}
Scope* DeclarationScope(VariableMode mode) {
return IsLexicalVariableMode(mode)
@@ -748,23 +704,20 @@ class Parser : public ParserBase<ParserTraits> {
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
// for failure at the call sites.
- void* ParseSourceElements(ZoneList<Statement*>* processor, int end_token,
- bool is_eval, bool is_global,
- Scope** ad_hoc_eval_scope, bool* ok);
- Statement* ParseModuleElement(ZoneList<const AstRawString*>* labels,
- bool* ok);
- Statement* ParseModuleDeclaration(ZoneList<const AstRawString*>* names,
- bool* ok);
- Module* ParseModule(bool* ok);
- Module* ParseModuleLiteral(bool* ok);
- Module* ParseModulePath(bool* ok);
- Module* ParseModuleVariable(bool* ok);
- Module* ParseModuleUrl(bool* ok);
- Module* ParseModuleSpecifier(bool* ok);
- Block* ParseImportDeclaration(bool* ok);
+ void* ParseStatementList(ZoneList<Statement*>* body, int end_token,
+ bool is_eval, Scope** ad_hoc_eval_scope, bool* ok);
+ Statement* ParseStatementListItem(bool* ok);
+ Statement* ParseModule(bool* ok);
+ Statement* ParseModuleItem(bool* ok);
+ Literal* ParseModuleSpecifier(bool* ok);
+ Statement* ParseImportDeclaration(bool* ok);
Statement* ParseExportDeclaration(bool* ok);
- Statement* ParseBlockElement(ZoneList<const AstRawString*>* labels, bool* ok);
+ Statement* ParseExportDefault(bool* ok);
+ void* ParseExportClause(ZoneList<const AstRawString*>* names,
+ Scanner::Location* reserved_loc, bool* ok);
+ void* ParseNamedImports(ZoneList<const AstRawString*>* names, bool* ok);
Statement* ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+ Statement* ParseSubStatement(ZoneList<const AstRawString*>* labels, bool* ok);
Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
bool* ok);
Statement* ParseClassDeclaration(ZoneList<const AstRawString*>* names,
@@ -830,8 +783,6 @@ class Parser : public ParserBase<ParserTraits> {
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
- bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
-
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
@@ -847,21 +798,16 @@ class Parser : public ParserBase<ParserTraits> {
void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
// Parser support
- VariableProxy* NewUnresolved(const AstRawString* name,
- VariableMode mode,
- Interface* interface);
+ VariableProxy* NewUnresolved(const AstRawString* name, VariableMode mode);
void Declare(Declaration* declaration, bool resolve, bool* ok);
bool TargetStackContainsLabel(const AstRawString* label);
BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
IterationStatement* LookupContinueTarget(const AstRawString* label, bool* ok);
- void RegisterTargetUse(Label* target, Target* stop);
+ void AddAssertIsConstruct(ZoneList<Statement*>* body, int pos);
// Factory methods.
-
- Scope* NewScope(Scope* parent, ScopeType type);
-
FunctionLiteral* DefaultConstructor(bool call_super, Scope* scope, int pos,
int end_pos);
@@ -878,9 +824,9 @@ class Parser : public ParserBase<ParserTraits> {
// Consumes the ending }.
ZoneList<Statement*>* ParseEagerFunctionBody(
const AstRawString* function_name, int pos, Variable* fvar,
- Token::Value fvar_init_op, bool is_generator, bool* ok);
+ Token::Value fvar_init_op, FunctionKind kind, bool* ok);
- void ThrowPendingError();
+ void ThrowPendingError(Isolate* isolate, Handle<Script> script);
TemplateLiteralState OpenTemplateLiteral(int pos);
void AddTemplateSpan(TemplateLiteralState* state, bool tail);
@@ -894,9 +840,10 @@ class Parser : public ParserBase<ParserTraits> {
PreParser* reusable_preparser_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
Target* target_stack_; // for break, continue statements
+ ScriptCompiler::CompileOptions compile_options_;
ParseData* cached_parse_data_;
- CompilationInfo* info_;
+ bool parsing_lazy_arrow_parameters_; // for lazily parsed arrow functions.
// Pending errors.
bool has_pending_error_;
@@ -911,6 +858,8 @@ class Parser : public ParserBase<ParserTraits> {
int use_counts_[v8::Isolate::kUseCounterFeatureCount];
int total_preparse_skipped_;
HistogramTimer* pre_parse_timer_;
+
+ bool parsing_on_main_thread_;
};
@@ -920,8 +869,9 @@ bool ParserTraits::IsFutureStrictReserved(
}
-Scope* ParserTraits::NewScope(Scope* parent_scope, ScopeType scope_type) {
- return parser_->NewScope(parent_scope, scope_type);
+Scope* ParserTraits::NewScope(Scope* parent_scope, ScopeType scope_type,
+ FunctionKind kind) {
+ return parser_->NewScope(parent_scope, scope_type, kind);
}
@@ -941,9 +891,9 @@ void ParserTraits::SkipLazyFunctionBody(const AstRawString* function_name,
ZoneList<Statement*>* ParserTraits::ParseEagerFunctionBody(
const AstRawString* name, int pos, Variable* fvar,
- Token::Value fvar_init_op, bool is_generator, bool* ok) {
- return parser_->ParseEagerFunctionBody(name, pos, fvar, fvar_init_op,
- is_generator, ok);
+ Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
+ return parser_->ParseEagerFunctionBody(name, pos, fvar, fvar_init_op, kind,
+ ok);
}
void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope,
@@ -952,11 +902,6 @@ void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope,
}
-AstValueFactory* ParserTraits::ast_value_factory() {
- return parser_->ast_value_factory();
-}
-
-
// Support for handling complex values (array and object literals) that
// can be fully handled at compile time.
class CompileTimeValue: public AllStatic {
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 3f30e38467..819fe4eef4 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -57,7 +57,7 @@ PerfJitLogger::PerfJitLogger() : perf_output_handle_(NULL), code_index_(0) {
CHECK_NE(size, -1);
perf_output_handle_ =
base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
- CHECK_NE(perf_output_handle_, NULL);
+ CHECK_NOT_NULL(perf_output_handle_);
setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
LogWriteHeader();
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 4b8b165657..8bb45e36cc 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -77,14 +77,15 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
cpu.part() == base::CPU::PPC_POWER8) {
supported_ |= (1u << LWSYNC);
}
+ if (cpu.part() == base::CPU::PPC_POWER7 ||
+ cpu.part() == base::CPU::PPC_POWER8) {
+ supported_ |= (1u << ISELECT);
+ }
#if V8_OS_LINUX
if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
// Assume support
supported_ |= (1u << FPU);
}
- if (cpu.cache_line_size() != 0) {
- cache_line_size_ = cpu.cache_line_size();
- }
#elif V8_OS_AIX
// Assume support FP support and default cache line size
supported_ |= (1u << FPU);
@@ -92,6 +93,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#else // Simulator
supported_ |= (1u << FPU);
supported_ |= (1u << LWSYNC);
+ supported_ |= (1u << ISELECT);
#if V8_TARGET_ARCH_PPC64
supported_ |= (1u << FPR_GPR_MOV);
#endif
@@ -220,9 +222,6 @@ MemOperand::MemOperand(Register ra, Register rb) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-// Spare buffer.
-static const int kMinimalBufferSize = 4 * KB;
-
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
@@ -249,6 +248,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
+ reloc_info_writer.Finish();
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -425,7 +425,6 @@ int Assembler::target_at(int pos) {
}
}
- PPCPORT_UNIMPLEMENTED();
DCHECK(false);
return -1;
}
@@ -438,17 +437,27 @@ void Assembler::target_at_put(int pos, int target_pos) {
// check which type of branch this is 16 or 26 bit offset
if (BX == opcode) {
int imm26 = target_pos - pos;
- DCHECK((imm26 & (kAAMask | kLKMask)) == 0);
- instr &= ((~kImm26Mask) | kAAMask | kLKMask);
- DCHECK(is_int26(imm26));
- instr_at_put(pos, instr | (imm26 & kImm26Mask));
+ DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
+ if (imm26 == kInstrSize && !(instr & kLKMask)) {
+ // Branch to next instr without link.
+ instr = ORI; // nop: ori, 0,0,0
+ } else {
+ instr &= ((~kImm26Mask) | kAAMask | kLKMask);
+ instr |= (imm26 & kImm26Mask);
+ }
+ instr_at_put(pos, instr);
return;
} else if (BCX == opcode) {
int imm16 = target_pos - pos;
- DCHECK((imm16 & (kAAMask | kLKMask)) == 0);
- instr &= ((~kImm16Mask) | kAAMask | kLKMask);
- DCHECK(is_int16(imm16));
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
+ if (imm16 == kInstrSize && !(instr & kLKMask)) {
+ // Branch to next instr without link.
+ instr = ORI; // nop: ori, 0,0,0
+ } else {
+ instr &= ((~kImm16Mask) | kAAMask | kLKMask);
+ instr |= (imm16 & kImm16Mask);
+ }
+ instr_at_put(pos, instr);
return;
} else if ((instr & ~kImm26Mask) == 0) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
@@ -861,9 +870,14 @@ void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
// Multiply hi word
-void Assembler::mulhw(Register dst, Register src1, Register src2, OEBit o,
- RCBit r) {
- xo_form(EXT2 | MULHWX, dst, src1, src2, o, r);
+void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
+ xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
+}
+
+
+// Multiply hi word unsigned
+void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
+ xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
}
@@ -874,6 +888,13 @@ void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
}
+// Divide word unsigned
+void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
+}
+
+
void Assembler::addi(Register dst, Register src, const Operand& imm) {
DCHECK(!src.is(r0)); // use li instead to show intent
d_form(ADDI, dst, src, imm.imm_, true);
@@ -926,6 +947,11 @@ void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
}
+void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
+ x_form(EXT2 | ORC, dst, src1, src2, rc);
+}
+
+
void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.imm_;
#if V8_TARGET_ARCH_PPC64
@@ -1014,6 +1040,12 @@ void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
}
+void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
+ emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ cb * B6);
+}
+
+
// Pseudo op - load immediate
void Assembler::li(Register dst, const Operand& imm) {
d_form(ADDI, dst, r0, imm.imm_, true);
@@ -1080,6 +1112,14 @@ void Assembler::lhzux(Register rt, const MemOperand& src) {
}
+void Assembler::lhax(Register rt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
void Assembler::lwz(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(LWZ, dst, src.ra(), src.offset(), true);
@@ -1110,6 +1150,12 @@ void Assembler::lwzux(Register rt, const MemOperand& src) {
}
+void Assembler::lha(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(LHA, dst, src.ra(), src.offset(), true);
+}
+
+
void Assembler::lwa(Register dst, const MemOperand& src) {
#if V8_TARGET_ARCH_PPC64
int offset = src.offset();
@@ -1123,6 +1169,18 @@ void Assembler::lwa(Register dst, const MemOperand& src) {
}
+void Assembler::lwax(Register rt, const MemOperand& src) {
+#if V8_TARGET_ARCH_PPC64
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+#else
+ lwzx(rt, src);
+#endif
+}
+
+
void Assembler::stb(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(STB, dst, src.ra(), src.offset(), true);
@@ -1211,6 +1269,16 @@ void Assembler::extsh(Register rs, Register ra, RCBit rc) {
}
+void Assembler::extsw(Register rs, Register ra, RCBit rc) {
+#if V8_TARGET_ARCH_PPC64
+ emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
+#else
+ // nop on 32-bit
+ DCHECK(rs.is(ra) && rc == LeaveRC);
+#endif
+}
+
+
void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
}
@@ -1386,11 +1454,6 @@ void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
}
-void Assembler::extsw(Register rs, Register ra, RCBit rc) {
- emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
-}
-
-
void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
@@ -1401,32 +1464,26 @@ void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
}
-#endif
-void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) {
- DCHECK(fopcode < fLastFaker);
- emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode);
-}
-
-
-void Assembler::marker_asm(int mcode) {
- if (::v8::internal::FLAG_trace_sim_stubs) {
- DCHECK(mcode < F_NEXT_AVAILABLE_STUB_MARKER);
- emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode);
- }
+void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
}
+#endif
// Function descriptor for AIX.
// Code address skips the function descriptor "header".
// TOC and static chain are ignored and set to 0.
void Assembler::function_descriptor() {
+#if ABI_USES_FUNCTION_DESCRIPTORS
DCHECK(pc_offset() == 0);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
emit_ptr(0);
emit_ptr(0);
+#endif
}
@@ -1545,8 +1602,8 @@ void Assembler::mov(Register dst, const Operand& src) {
RecordRelocInfo(rinfo);
}
- canOptimize =
- !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
+ canOptimize = !(src.must_output_reloc_info(this) ||
+ (is_trampoline_pool_blocked() && !is_int16(value)));
#if V8_OOL_CONSTANT_POOL
if (use_constant_pool_for_mov(src, canOptimize)) {
@@ -1742,21 +1799,6 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code,
void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
-void Assembler::info(const char* msg, Condition cond, int32_t code,
- CRegister cr) {
- if (::v8::internal::FLAG_trace_sim_stubs) {
- emit(0x7d9ff808);
-#if V8_TARGET_ARCH_PPC64
- uint64_t value = reinterpret_cast<uint64_t>(msg);
- emit(static_cast<uint32_t>(value >> 32));
- emit(static_cast<uint32_t>(value & 0xFFFFFFFF));
-#else
- emit(reinterpret_cast<Instr>(msg));
-#endif
- }
-}
-
-
void Assembler::dcbf(Register ra, Register rb) {
emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
}
@@ -1984,8 +2026,27 @@ void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
}
-void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb) {
- emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11);
+void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
}
@@ -2112,28 +2173,6 @@ bool Assembler::IsNop(Instr instr, int type) {
// Debugging.
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 2b112d6ca5..a3949556f3 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -54,11 +54,8 @@
#define ABI_PASSES_HANDLES_IN_REGS \
(!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64)
-#define ABI_RETURNS_HANDLES_IN_REGS \
- (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
-
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
- (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
+ (!V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN)
#define ABI_TOC_ADDRESSABILITY_VIA_IP \
(V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
@@ -832,6 +829,48 @@ class Assembler : public AssemblerBase {
}
}
+ void isel(Register rt, Register ra, Register rb, int cb);
+ void isel(Condition cond, Register rt, Register ra, Register rb,
+ CRegister cr = cr7) {
+ DCHECK(cond != al);
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+
+ switch (cond) {
+ case eq:
+ isel(rt, ra, rb, encode_crbit(cr, CR_EQ));
+ break;
+ case ne:
+ isel(rt, rb, ra, encode_crbit(cr, CR_EQ));
+ break;
+ case gt:
+ isel(rt, ra, rb, encode_crbit(cr, CR_GT));
+ break;
+ case le:
+ isel(rt, rb, ra, encode_crbit(cr, CR_GT));
+ break;
+ case lt:
+ isel(rt, ra, rb, encode_crbit(cr, CR_LT));
+ break;
+ case ge:
+ isel(rt, rb, ra, encode_crbit(cr, CR_LT));
+ break;
+ case unordered:
+ isel(rt, ra, rb, encode_crbit(cr, CR_FU));
+ break;
+ case ordered:
+ isel(rt, rb, ra, encode_crbit(cr, CR_FU));
+ break;
+ case overflow:
+ isel(rt, ra, rb, encode_crbit(cr, CR_SO));
+ break;
+ case nooverflow:
+ isel(rt, rb, ra, encode_crbit(cr, CR_SO));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+
void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
if (cond == al) {
b(L, lk);
@@ -907,11 +946,13 @@ class Assembler : public AssemblerBase {
void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
- void mulhw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
- RCBit r = LeaveRC);
+ void mulhw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void mulhwu(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
+ void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
void addi(Register dst, Register src, const Operand& imm);
void addis(Register dst, Register src, const Operand& imm);
@@ -926,6 +967,7 @@ class Assembler : public AssemblerBase {
void ori(Register dst, Register src, const Operand& imm);
void oris(Register dst, Register src, const Operand& imm);
void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+ void orc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
void xori(Register dst, Register src, const Operand& imm);
void xoris(Register ra, Register rs, const Operand& imm);
void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
@@ -943,11 +985,14 @@ class Assembler : public AssemblerBase {
void lhz(Register dst, const MemOperand& src);
void lhzx(Register dst, const MemOperand& src);
void lhzux(Register dst, const MemOperand& src);
+ void lha(Register dst, const MemOperand& src);
+ void lhax(Register dst, const MemOperand& src);
void lwz(Register dst, const MemOperand& src);
void lwzu(Register dst, const MemOperand& src);
void lwzx(Register dst, const MemOperand& src);
void lwzux(Register dst, const MemOperand& src);
void lwa(Register dst, const MemOperand& src);
+ void lwax(Register dst, const MemOperand& src);
void stb(Register dst, const MemOperand& src);
void stbx(Register dst, const MemOperand& src);
void stbux(Register dst, const MemOperand& src);
@@ -961,6 +1006,7 @@ class Assembler : public AssemblerBase {
void extsb(Register rs, Register ra, RCBit r = LeaveRC);
void extsh(Register rs, Register ra, RCBit r = LeaveRC);
+ void extsw(Register rs, Register ra, RCBit r = LeaveRC);
void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
@@ -992,11 +1038,12 @@ class Assembler : public AssemblerBase {
void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC);
- void extsw(Register rs, Register ra, RCBit r = LeaveRC);
void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
+ void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
#endif
void rlwinm(Register ra, Register rs, int sh, int mb, int me,
@@ -1059,8 +1106,6 @@ class Assembler : public AssemblerBase {
void mtfprwa(DoubleRegister dst, Register src);
#endif
- void fake_asm(enum FAKE_OPCODE_T fopcode);
- void marker_asm(int mcode);
void function_descriptor();
// Exception-generating instructions and debugging support
@@ -1069,10 +1114,6 @@ class Assembler : public AssemblerBase {
void bkpt(uint32_t imm16); // v5 and above
- // Informational messages when simulating
- void info(const char* msg, Condition cond = al,
- int32_t code = kDefaultStopCode, CRegister cr = cr7);
-
void dcbf(Register ra, Register rb);
void sync();
void lwsync();
@@ -1111,7 +1152,14 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
void fctiw(const DoubleRegister frt, const DoubleRegister frb);
- void frim(const DoubleRegister frt, const DoubleRegister frb);
+ void frin(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void friz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void frip(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void frim(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
void frsp(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fcfid(const DoubleRegister frt, const DoubleRegister frb,
@@ -1233,6 +1281,10 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg);
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, const int raw_position);
+
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -1366,12 +1418,6 @@ class Assembler : public AssemblerBase {
bool is_trampoline_emitted() const { return trampoline_emitted_; }
-#if V8_OOL_CONSTANT_POOL
- void set_constant_pool_available(bool available) {
- constant_pool_available_ = available;
- }
-#endif
-
private:
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index 7817fcd0f6..ca8704f9dd 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -306,6 +306,34 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
+static void Generate_Runtime_NewObject(MacroAssembler* masm,
+ bool create_memento,
+ Register original_constructor,
+ Label* count_incremented,
+ Label* allocated) {
+ // ----------- S t a t e -------------
+ // -- r4: argument for Runtime_NewObject
+ // -----------------------------------
+ Register result = r7;
+
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
+ __ Push(r5, r4, original_constructor);
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ __ mr(result, r3);
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ __ b(count_incremented);
+ } else {
+ __ Push(r4, original_constructor);
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ mr(result, r3);
+ __ b(allocated);
+ }
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
@@ -313,6 +341,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r5 : allocation site or undefined
+ // -- r6 : original constructor
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -327,18 +356,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
- __ AssertUndefinedOrAllocationSite(r5, r6);
+ __ AssertUndefinedOrAllocationSite(r5, r7);
__ push(r5);
}
// Preserve the two incoming parameters on the stack.
__ SmiTag(r3);
- __ push(r3); // Smi-tagged arguments count.
- __ push(r4); // Constructor function.
+ __ Push(r3, r4);
+
+ Label rt_call, allocated, normal_new, count_incremented;
+ __ cmp(r4, r6);
+ __ beq(&normal_new);
+
+ // Original constructor and function are different.
+ Generate_Runtime_NewObject(masm, create_memento, r6, &count_incremented,
+ &allocated);
+ __ bind(&normal_new);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
ExternalReference debug_step_in_fp =
@@ -369,14 +405,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
// Check if slack tracking is enabled.
__ lwz(r7, bit_field3);
- __ DecodeField<Map::ConstructionCount>(r11, r7);
- STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
- __ cmpi(r11, Operand::Zero()); // JSFunction::kNoSlackTracking
- __ beq(&allocate);
+ __ DecodeField<Map::Counter>(r11, r7);
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ blt(&allocate);
// Decrease generous allocation count.
- __ Add(r7, r7, -(1 << Map::ConstructionCount::kShift), r0);
+ __ Add(r7, r7, -(1 << Map::Counter::kShift), r0);
__ stw(r7, bit_field3);
- __ cmpi(r11, Operand(JSFunction::kFinishSlackTracking));
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
__ bne(&allocate);
__ push(r4);
@@ -429,9 +464,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label no_inobject_slack_tracking;
// Check if slack tracking is enabled.
- STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
- __ cmpi(r11, Operand::Zero()); // JSFunction::kNoSlackTracking
- __ beq(&no_inobject_slack_tracking);
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ blt(&no_inobject_slack_tracking);
// Allocate object with a slack.
__ lbz(r3, FieldMemOperand(r5, Map::kPreAllocatedPropertyFieldsOffset));
@@ -568,27 +602,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// r4: constructor function
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
- __ push(r5);
- }
-
- __ push(r4); // argument for Runtime_NewObject
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
- } else {
- __ CallRuntime(Runtime::kNewObject, 1);
- }
- __ mr(r7, r3);
-
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- Label count_incremented;
- if (create_memento) {
- __ b(&count_incremented);
- }
+ Generate_Runtime_NewObject(masm, create_memento, r4, &count_incremented,
+ &allocated);
// Receiver for constructor call allocated.
// r7: JSObject
@@ -723,6 +738,74 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- r5 : allocation site or undefined
+ // -- r6 : original constructor
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // TODO(dslomov): support pretenuring
+ CHECK(!FLAG_pretenuring_call_new);
+
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Smi-tagged arguments count.
+ __ mr(r7, r3);
+ __ SmiTag(r7, SetRC);
+
+ // receiver is the hole.
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ Push(r7, ip);
+
+ // Set up pointer to last argument.
+ __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ // r3: number of arguments
+ // r4: constructor function
+ // r5: address of last argument (caller sp)
+ // r7: number of arguments (smi-tagged)
+ // cr0: compare against zero of arguments
+ // sp[0]: receiver
+ // sp[1]: number of arguments (smi-tagged)
+ Label loop, no_args;
+ __ beq(&no_args, cr0);
+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ mtctr(r3);
+ __ bind(&loop);
+ __ subi(ip, ip, Operand(kPointerSize));
+ __ LoadPX(r0, MemOperand(r5, ip));
+ __ push(r0);
+ __ bdnz(&loop);
+ __ bind(&no_args);
+
+ // Call the function.
+ // r3: number of arguments
+ // r4: constructor function
+ ParameterCount actual(r3);
+ __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+
+ // Restore context from the frame.
+ // r3: result
+ // sp[0]: number of arguments (smi-tagged)
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadP(r4, MemOperand(sp, 0));
+
+ // Leave construct frame.
+ }
+
+ __ SmiToPtrArrayOffset(r4, r4);
+ __ add(sp, sp, r4);
+ __ addi(sp, sp, Operand(kPointerSize));
+ __ blr();
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 3e84a2143c..0226ffbf57 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -157,7 +157,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Test for overflow
#if V8_TARGET_ARCH_PPC64
- __ TestIfInt32(result_reg, scratch, r0);
+ __ TestIfInt32(result_reg, r0);
#else
__ TestIfInt32(scratch, result_reg, r0);
#endif
@@ -328,15 +328,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// not (it's a NaN). For <= and >= we need to load r0 with the failing
// value if it's a NaN.
if (cond != eq) {
- Label not_equal;
- __ bne(&not_equal);
- // All-zero means Infinity means equal.
- __ Ret();
- __ bind(&not_equal);
- if (cond == le) {
- __ li(r3, Operand(GREATER)); // NaN <= NaN should fail.
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ li(r4, Operand((cond == le) ? GREATER : LESS));
+ __ isel(eq, r3, r3, r4);
} else {
- __ li(r3, Operand(LESS)); // NaN >= NaN should fail.
+ Label not_equal;
+ __ bne(&not_equal);
+ // All-zero means Infinity means equal.
+ __ Ret();
+ __ bind(&not_equal);
+ if (cond == le) {
+ __ li(r3, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(r3, Operand(LESS)); // NaN >= NaN should fail.
+ }
}
}
__ Ret();
@@ -571,7 +576,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ and_(r5, lhs, rhs);
__ JumpIfNotSmi(r5, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -592,16 +597,25 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Label nan, equal, less_than;
__ bunordered(&nan);
- __ beq(&equal);
- __ blt(&less_than);
- __ li(r3, Operand(GREATER));
- __ Ret();
- __ bind(&equal);
- __ li(r3, Operand(EQUAL));
- __ Ret();
- __ bind(&less_than);
- __ li(r3, Operand(LESS));
- __ Ret();
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ DCHECK(EQUAL == 0);
+ __ li(r4, Operand(GREATER));
+ __ li(r5, Operand(LESS));
+ __ isel(eq, r3, r0, r4);
+ __ isel(lt, r3, r5, r3);
+ __ Ret();
+ } else {
+ __ beq(&equal);
+ __ blt(&less_than);
+ __ li(r3, Operand(GREATER));
+ __ Ret();
+ __ bind(&equal);
+ __ li(r3, Operand(EQUAL));
+ __ Ret();
+ __ bind(&less_than);
+ __ li(r3, Operand(LESS));
+ __ Ret();
+ }
__ bind(&nan);
// If one of the sides was a NaN then the v flag is set. Load r3 with
@@ -862,11 +876,16 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ConvertIntToDouble(scratch2, double_result);
// Get absolute value of exponent.
- Label positive_exponent;
__ cmpi(scratch, Operand::Zero());
- __ bge(&positive_exponent);
- __ neg(scratch, scratch);
- __ bind(&positive_exponent);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ neg(scratch2, scratch);
+ __ isel(lt, scratch, scratch2, scratch);
+ } else {
+ Label positive_exponent;
+ __ bge(&positive_exponent);
+ __ neg(scratch, scratch);
+ __ bind(&positive_exponent);
+ }
Label while_true, no_carry, loop_end;
__ bind(&while_true);
@@ -937,11 +956,11 @@ bool CEntryStub::NeedsImmovableCode() { return true; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- // WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
@@ -1000,7 +1019,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
int arg_stack_space = 1;
// PPC LINUX ABI:
-#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// Pass buffer for return value on stack if necessary
if (result_size() > 1) {
DCHECK_EQ(2, result_size());
@@ -1020,7 +1039,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Result returned in registers or stack, depending on result size and ABI.
Register isolate_reg = r5;
-#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
if (result_size() > 1) {
// The return value is 16-byte non-scalar value.
// Use frame storage reserved by calling function to pass return
@@ -1068,7 +1087,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Call(target);
}
-#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// If return value is on the stack, pop it to registers.
if (result_size() > 1) {
__ LoadP(r4, MemOperand(r3, kPointerSize));
@@ -1150,9 +1169,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
// Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -1501,17 +1518,24 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Push(r3, r4);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
- Label true_value, done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&true_value);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ cmpi(r3, Operand::Zero());
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ LoadRoot(r4, Heap::kFalseValueRootIndex);
+ __ isel(eq, r3, r3, r4);
+ } else {
+ Label true_value, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&true_value);
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
- __ b(&done);
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ b(&done);
- __ bind(&true_value);
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ bind(&true_value);
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
- __ bind(&done);
+ __ bind(&done);
+ }
__ Ret(HasArgsInRegisters() ? 0 : 2);
}
}
@@ -1520,9 +1544,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
- r7, &miss);
+ // Ensure that the vector and slot registers won't be clobbered before
+ // calling the miss handler.
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r7, r8, VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister()));
+
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7,
+ r8, &miss);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
@@ -1535,10 +1564,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
- Register scratch = r6;
+ Register scratch = r8;
Register result = r3;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1675,11 +1710,15 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r4 = parameter count (tagged)
// r5 = argument count (tagged)
// Compute the mapped parameter count = min(r4, r5) in r4.
- Label skip;
__ cmp(r4, r5);
- __ blt(&skip);
- __ mr(r4, r5);
- __ bind(&skip);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(lt, r4, r4, r5);
+ } else {
+ Label skip;
+ __ blt(&skip);
+ __ mr(r4, r5);
+ __ bind(&skip);
+ }
__ bind(&try_allocate);
@@ -1688,15 +1727,21 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
- Label skip2, skip3;
__ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
- __ bne(&skip2);
- __ li(r11, Operand::Zero());
- __ b(&skip3);
- __ bind(&skip2);
- __ SmiToPtrArrayOffset(r11, r4);
- __ addi(r11, r11, Operand(kParameterMapHeaderSize));
- __ bind(&skip3);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ SmiToPtrArrayOffset(r11, r4);
+ __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+ __ isel(eq, r11, r0, r11);
+ } else {
+ Label skip2, skip3;
+ __ bne(&skip2);
+ __ li(r11, Operand::Zero());
+ __ b(&skip3);
+ __ bind(&skip2);
+ __ SmiToPtrArrayOffset(r11, r4);
+ __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+ __ bind(&skip3);
+ }
// 2. Backing store.
__ SmiToPtrArrayOffset(r7, r5);
@@ -1720,14 +1765,20 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ LoadP(r7,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
- Label skip4, skip5;
__ cmpi(r4, Operand::Zero());
- __ bne(&skip4);
- __ LoadP(r7, MemOperand(r7, kNormalOffset));
- __ b(&skip5);
- __ bind(&skip4);
- __ LoadP(r7, MemOperand(r7, kAliasedOffset));
- __ bind(&skip5);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadP(r11, MemOperand(r7, kNormalOffset));
+ __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+ __ isel(eq, r7, r11, r7);
+ } else {
+ Label skip4, skip5;
+ __ bne(&skip4);
+ __ LoadP(r7, MemOperand(r7, kNormalOffset));
+ __ b(&skip5);
+ __ bind(&skip4);
+ __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+ __ bind(&skip5);
+ }
// r3 = address of new object (tagged)
// r4 = mapped parameter count (tagged)
@@ -1764,14 +1815,20 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r5 = argument count (tagged)
// r7 = address of parameter map or backing store (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map, skip6;
+ Label skip_parameter_map;
__ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
- __ bne(&skip6);
- // Move backing store address to r6, because it is
- // expected there when filling in the unmapped arguments.
- __ mr(r6, r7);
- __ b(&skip_parameter_map);
- __ bind(&skip6);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(eq, r6, r7, r6);
+ __ beq(&skip_parameter_map);
+ } else {
+ Label skip6;
+ __ bne(&skip6);
+ // Move backing store address to r6, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mr(r6, r7);
+ __ b(&skip_parameter_map);
+ __ bind(&skip6);
+ }
__ LoadRoot(r9, Heap::kSloppyArgumentsElementsMapRootIndex);
__ StoreP(r9, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
@@ -2684,19 +2741,27 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// entry is at the feedback vector slot given by r6 + 1.
__ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize + kPointerSize));
} else {
- Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into r5, or undefined.
__ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
__ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
__ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- __ beq(&feedback_register_initialized);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ isel(eq, r5, r5, r8);
+ } else {
+ Label feedback_register_initialized;
+ __ beq(&feedback_register_initialized);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
}
__ AssertUndefinedOrAllocationSite(r5, r8);
}
+ // Pass function as original constructor.
+ __ mr(r6, r4);
+
// Jump to the function-specific construct stub.
Register jmp_reg = r7;
__ LoadP(jmp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
@@ -2738,12 +2803,11 @@ static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// r4 - function
// r6 - slot id
+ // r5 - vector
Label miss;
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, r5);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
__ cmp(r4, r7);
__ bne(&miss);
@@ -2776,21 +2840,44 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
void CallICStub::Generate(MacroAssembler* masm) {
// r4 - function
// r6 - slot id (Smi)
+ // r5 - vector
+ const int with_types_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+ const int generic_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, r5);
-
// The checks. First, does r4 match the recorded monomorphic target?
__ SmiToPtrArrayOffset(r7, r6);
__ add(r7, r5, r7);
__ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
- __ cmp(r4, r7);
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ LoadP(r8, FieldMemOperand(r7, WeakCell::kValueOffset));
+ __ cmp(r4, r8);
__ bne(&extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(r4, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2815,38 +2902,74 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label miss;
+ Label uninitialized, miss;
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
__ beq(&slow_start);
+
+ // The following cases attempt to handle MISS cases without going to the
+ // runtime.
+ if (FLAG_trace_ic) {
+ __ b(&miss);
+ }
+
__ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
+ __ beq(&uninitialized);
+
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(r7);
+ __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
+ __ bne(&miss);
+ __ SmiToPtrArrayOffset(r7, r6);
+ __ add(r7, r5, r7);
+ __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+ __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+ // We have to update statistics for runtime profiling.
+ __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
+ __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+ __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+ __ LoadP(r7, FieldMemOperand(r5, generic_offset));
+ __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+ __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
+ __ b(&slow_start);
+
+ __ bind(&uninitialized);
+
+ // We are going monomorphic, provided we actually have a JSFunction.
+ __ JumpIfSmi(r4, &miss);
+
+ // Goto miss case if we do not have a function.
+ __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
+ __ bne(&miss);
+
+ // Make sure the function is not the Array() function, which requires special
+ // behavior on MISS.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ cmp(r4, r7);
__ beq(&miss);
- if (!FLAG_trace_ic) {
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(r7);
- __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
- __ bne(&miss);
- __ SmiToPtrArrayOffset(r7, r6);
- __ add(r7, r5, r7);
- __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
- // We have to update statistics for runtime profiling.
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
- __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- __ LoadP(r7, FieldMemOperand(r5, generic_offset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
- __ jmp(&slow_start);
+ // Update stats.
+ __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
+ __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+ __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+
+ // Store the function. Use a stub since we need a frame for allocation.
+ // r5 - vector
+ // r6 - slot
+ // r4 - function
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(r4);
+ __ CallStub(&create_stub);
+ __ Pop(r4);
}
- // We are here because tracing is on or we are going monomorphic.
+ __ b(&have_js_function);
+
+ // We are here because tracing is on or we encountered a MISS case we can't
+ // handle here.
__ bind(&miss);
GenerateMiss(masm);
@@ -2864,25 +2987,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ LoadP(r7, MemOperand(sp, (arg_count() + 1) * kPointerSize), r0);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push the function and feedback info.
+ __ Push(r4, r5, r6);
- // Push the receiver and the function and feedback info.
- __ Push(r7, r4, r5, r6);
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 3);
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 4);
-
- // Move result to r4 and exit the internal frame.
- __ mr(r4, r3);
- }
+ // Move result to r4 and exit the internal frame.
+ __ mr(r4, r3);
}
@@ -3270,6 +3388,49 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in r3.
+ Label not_smi;
+ __ JumpIfNotSmi(r3, &not_smi);
+ __ blr();
+ __ bind(&not_smi);
+
+ Label not_heap_number;
+ __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ // r3: object
+ // r4: instance type.
+ __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
+ __ bne(&not_heap_number);
+ __ blr();
+ __ bind(&not_heap_number);
+
+ Label not_string, slow_string;
+ __ cmpli(r4, Operand(FIRST_NONSTRING_TYPE));
+ __ bge(&not_string);
+ // Check if string has a cached array index.
+ __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
+ __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
+ __ bne(&slow_string, cr0);
+ __ IndexFromHash(r5, r3);
+ __ blr();
+ __ bind(&slow_string);
+ __ push(r3); // Push argument.
+ __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ bind(&not_string);
+
+ Label not_oddball;
+ __ cmpi(r4, Operand(ODDBALL_TYPE));
+ __ bne(&not_oddball);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+ __ blr();
+ __ bind(&not_oddball);
+
+ __ push(r3); // Push argument.
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3310,15 +3471,20 @@ void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
void StringHelper::GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
- Label skip, result_not_equal, compare_lengths;
+ Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
__ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
Register length_delta = scratch3;
- __ ble(&skip, cr0);
- __ mr(scratch1, scratch2);
- __ bind(&skip);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(gt, scratch1, scratch2, scratch1, cr0);
+ } else {
+ Label skip;
+ __ ble(&skip, cr0);
+ __ mr(scratch1, scratch2);
+ __ bind(&skip);
+ }
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
__ cmpi(min_length, Operand::Zero());
@@ -3337,15 +3503,23 @@ void StringHelper::GenerateCompareFlatOneByteStrings(
__ bind(&result_not_equal);
// Conditionally update the result based either on length_delta or
// the last comparion performed in the loop above.
- Label less_equal, equal;
- __ ble(&less_equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
- __ Ret();
- __ bind(&less_equal);
- __ beq(&equal);
- __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
- __ bind(&equal);
- __ Ret();
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ li(r4, Operand(GREATER));
+ __ li(r5, Operand(LESS));
+ __ isel(eq, r3, r0, r4);
+ __ isel(lt, r3, r5, r3);
+ __ Ret();
+ } else {
+ Label less_equal, equal;
+ __ ble(&less_equal);
+ __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
+ __ Ret();
+ __ bind(&less_equal);
+ __ beq(&equal);
+ __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
+ __ bind(&equal);
+ __ Ret();
+ }
}
@@ -3513,17 +3687,26 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bunordered(&unordered);
// Return a result of -1, 0, or 1, based on status bits.
- __ beq(&equal);
- __ blt(&less_than);
- // assume greater than
- __ li(r3, Operand(GREATER));
- __ Ret();
- __ bind(&equal);
- __ li(r3, Operand(EQUAL));
- __ Ret();
- __ bind(&less_than);
- __ li(r3, Operand(LESS));
- __ Ret();
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ DCHECK(EQUAL == 0);
+ __ li(r4, Operand(GREATER));
+ __ li(r5, Operand(LESS));
+ __ isel(eq, r3, r0, r4);
+ __ isel(lt, r3, r5, r3);
+ __ Ret();
+ } else {
+ __ beq(&equal);
+ __ blt(&less_than);
+ // assume greater than
+ __ li(r3, Operand(GREATER));
+ __ Ret();
+ __ bind(&equal);
+ __ li(r3, Operand(EQUAL));
+ __ Ret();
+ __ bind(&less_than);
+ __ li(r3, Operand(LESS));
+ __ Ret();
+ }
__ bind(&unordered);
__ bind(&generic_stub);
@@ -3737,13 +3920,15 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ and_(r5, r4, r3);
__ JumpIfSmi(r5, &miss);
+ __ GetWeakValue(r7, cell);
__ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ Cmpi(r5, Operand(known_map_), r0);
+ __ cmp(r5, r7);
__ bne(&miss);
- __ Cmpi(r6, Operand(known_map_), r0);
+ __ cmp(r6, r7);
__ bne(&miss);
__ sub(r3, r3, r4);
@@ -3827,7 +4012,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
__ and_(index, index, ip);
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ ShiftLeftImm(ip, index, Operand(1));
__ add(index, index, ip); // index *= 3.
@@ -4016,7 +4201,6 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ ShiftLeftImm(scratch, index, Operand(1));
__ add(index, index, scratch); // index *= 3.
- DCHECK_EQ(kSmiTagSize, 1);
__ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
__ add(index, dictionary, scratch);
__ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
@@ -4337,6 +4521,20 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
}
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, r5);
+ CallICStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, r5);
+ CallIC_ArrayStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
@@ -4713,12 +4911,167 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ // Additional parameter is the address of the actual callback.
+ DCHECK(function_address.is(r4) || function_address.is(r5));
+ Register scratch = r6;
+
+ __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
+ __ lbz(scratch, MemOperand(scratch, 0));
+ __ cmpi(scratch, Operand::Zero());
+
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ mov(scratch, Operand(thunk_ref));
+ __ isel(eq, scratch, function_address, scratch);
+ } else {
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ beq(&profiler_disabled);
+ __ mov(scratch, Operand(thunk_ref));
+ __ b(&end_profiler_check);
+ __ bind(&profiler_disabled);
+ __ mr(scratch, function_address);
+ __ bind(&end_profiler_check);
+ }
+
+ // Allocate HandleScope in callee-save registers.
+ // r17 - next_address
+ // r14 - next_address->kNextOffset
+ // r15 - next_address->kLimitOffset
+ // r16 - next_address->kLevelOffset
+ __ mov(r17, Operand(next_address));
+ __ LoadP(r14, MemOperand(r17, kNextOffset));
+ __ LoadP(r15, MemOperand(r17, kLimitOffset));
+ __ lwz(r16, MemOperand(r17, kLevelOffset));
+ __ addi(r16, r16, Operand(1));
+ __ stw(r16, MemOperand(r17, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r3);
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate);
+ stub.GenerateCall(masm, scratch);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, r3);
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // load value from ReturnValue
+ __ LoadP(r3, return_value_operand);
+ __ bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ StoreP(r14, MemOperand(r17, kNextOffset));
+ if (__ emit_debug_code()) {
+ __ lwz(r4, MemOperand(r17, kLevelOffset));
+ __ cmp(r4, r16);
+ __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ __ subi(r16, r16, Operand(1));
+ __ stw(r16, MemOperand(r17, kLevelOffset));
+ __ LoadP(r0, MemOperand(r17, kLimitOffset));
+ __ cmp(r15, r0);
+ __ bne(&delete_allocated_handles);
+
+ // Check if the function scheduled an exception.
+ __ bind(&leave_exit_frame);
+ __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ LoadP(r15, MemOperand(r15));
+ __ cmp(r14, r15);
+ __ bne(&promote_scheduled_exception);
+ __ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ LoadP(cp, *context_restore_operand);
+ }
+ // LeaveExitFrame expects unwind space to be in a register.
+ if (stack_space_operand != NULL) {
+ __ lwz(r14, *stack_space_operand);
+ } else {
+ __ mov(r14, Operand(stack_space));
+ }
+ __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
+ __ blr();
+
+ __ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
+ }
+ __ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ StoreP(r15, MemOperand(r17, kLimitOffset));
+ __ mr(r14, r3);
+ __ PrepareCallCFunction(1, r15);
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
+ 1);
+ __ mr(r3, r14);
+ __ b(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
// -- r3 : callee
// -- r7 : call_data
// -- r5 : holder
// -- r4 : api_function_address
+ // -- r6 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@@ -4733,10 +5086,6 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register api_function_address = r4;
Register context = cp;
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
-
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
@@ -4748,6 +5097,8 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
+ DCHECK(argc.is_immediate() || r3.is(argc.reg()));
+
// context save
__ push(context);
// load context from callee
@@ -4768,7 +5119,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// return value default
__ push(scratch);
// isolate
- __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
__ push(scratch);
// holder
__ push(holder);
@@ -4784,6 +5135,8 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// [0] space for DirectCEntryStub's LR save
// [1-4] FunctionCallbackInfo
const int kApiStackSpace = 5;
+ const int kFunctionCallbackInfoOffset =
+ (kStackFrameExtraParamSlot + 1) * kPointerSize;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
@@ -4791,38 +5144,73 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
DCHECK(!api_function_address.is(r3) && !scratch.is(r3));
// r3 = FunctionCallbackInfo&
// Arguments is after the return address.
- __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+ __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
// FunctionCallbackInfo::implicit_args_
__ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
- __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(ip, Operand(argc));
- __ stw(ip, MemOperand(r3, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ li(ip, Operand::Zero());
- __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
-
- const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ if (argc.is_immediate()) {
+ // FunctionCallbackInfo::values_
+ __ addi(ip, scratch,
+ Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
+ __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(ip, Operand(argc.immediate()));
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_ = 0
+ __ li(ip, Operand::Zero());
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
+ } else {
+ __ ShiftLeftImm(ip, argc.reg(), Operand(kPointerSizeLog2));
+ __ addi(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ add(r0, scratch, ip);
+ __ StoreP(r0, MemOperand(r3, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ stw(argc.reg(), MemOperand(r3, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call_
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
+ }
+
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (is_store) {
+ if (return_first_arg) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ int stack_space = 0;
+ MemOperand is_construct_call_operand =
+ MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
+ MemOperand* stack_space_operand = &is_construct_call_operand;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_operand = NULL;
+ }
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
+ stack_space_operand, return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(r6), false,
+ call_data_undefined);
+}
+
- __ CallApiFunctionAndReturn(api_function_address, thunk_ref,
- kStackUnwindSpace, return_value_operand,
- &context_restore_operand);
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
@@ -4880,9 +5268,9 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ CallApiFunctionAndReturn(api_function_address, thunk_ref,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, NULL,
+ MemOperand(fp, 6 * kPointerSize), NULL);
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index a9d06fb62e..72f71b7b18 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -94,7 +94,7 @@ class RecordWriteStub : public PlatformCodeStub {
enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
- virtual bool SometimesSetsUpAFrame() { return false; }
+ bool SometimesSetsUpAFrame() OVERRIDE { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
// Consider adding DCHECK here to catch bad patching
@@ -224,7 +224,7 @@ class RecordWriteStub : public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
- void Activate(Code* code) {
+ void Activate(Code* code) OVERRIDE {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
@@ -273,7 +273,7 @@ class DirectCEntryStub : public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
- bool NeedsImmovableCode() { return true; }
+ bool NeedsImmovableCode() OVERRIDE { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@@ -298,7 +298,7 @@ class NameDictionaryLookupStub : public PlatformCodeStub {
Label* done, Register elements,
Register name, Register r0, Register r1);
- virtual bool SometimesSetsUpAFrame() { return false; }
+ bool SometimesSetsUpAFrame() OVERRIDE { return false; }
private:
static const int kInlinedProbes = 4;
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 1074e872bf..93d32c2bc6 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -46,9 +46,7 @@ UnaryMathFunction CreateExpFunction() {
Register temp3 = r9;
// Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
@@ -88,9 +86,7 @@ UnaryMathFunction CreateSqrtFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
__ MovFromFloatParameter(d1);
__ fsqrt(d1, d1);
@@ -159,7 +155,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// lr contains the return address
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
+ Label loop, entry, convert_hole, only_change_map, done;
Register elements = r7;
Register length = r8;
Register array = r9;
@@ -167,7 +163,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// target_map parameter can be clobbered.
Register scratch1 = target_map;
- Register scratch2 = r11;
+ Register scratch2 = r10;
+ Register scratch3 = r11;
+ Register scratch4 = r14;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
@@ -183,17 +181,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ beq(&only_change_map);
- // Preserve lr and use r17 as a temporary register.
- __ mflr(r0);
- __ Push(r0);
-
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
- __ SmiToDoubleArrayOffset(r17, length);
- __ addi(r17, r17, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(r17, array, r10, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+ __ SmiToDoubleArrayOffset(scratch3, length);
+ __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
+ __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
+ // array: destination FixedDoubleArray, not tagged as heap object.
+ // elements: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
@@ -203,27 +199,30 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
- kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ addi(scratch1, array, Operand(kHeapObjectTag));
__ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
__ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
- kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
- __ addi(target_map, elements,
+ __ addi(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ addi(r10, array, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiToDoubleArrayOffset(array, length);
- __ add(array_end, r10, array);
+ __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
+ __ SmiToDoubleArrayOffset(array_end, length);
+ __ add(array_end, scratch2, array_end);
// Repurpose registers no longer in use.
#if V8_TARGET_ARCH_PPC64
Register hole_int64 = elements;
+ __ mov(hole_int64, Operand(kHoleNanInt64));
#else
Register hole_lower = elements;
Register hole_upper = length;
+ __ mov(hole_lower, Operand(kHoleNanLower32));
+ __ mov(hole_upper, Operand(kHoleNanUpper32));
#endif
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32 OR hol_int64
@@ -240,48 +239,38 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_SMI_CHECK);
__ b(&done);
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ Pop(r0);
- __ mtlr(r0);
- __ b(fail);
-
// Convert and copy elements.
__ bind(&loop);
- __ LoadP(r11, MemOperand(scratch1));
+ __ LoadP(scratch3, MemOperand(scratch1));
__ addi(scratch1, scratch1, Operand(kPointerSize));
- // r11: current element
- __ UntagAndJumpIfNotSmi(r11, r11, &convert_hole);
+ // scratch3: current element
+ __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
// Normal smi, convert to double and store.
- __ ConvertIntToDouble(r11, d0);
+ __ ConvertIntToDouble(scratch3, d0);
__ stfd(d0, MemOperand(scratch2, 0));
- __ addi(r10, r10, Operand(8));
-
+ __ addi(scratch2, scratch2, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ LoadP(r11, MemOperand(r6, -kPointerSize));
- __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
+ __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
+ __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
#if V8_TARGET_ARCH_PPC64
- __ std(hole_int64, MemOperand(r10, 0));
+ __ std(hole_int64, MemOperand(scratch2, 0));
#else
- __ stw(hole_upper, MemOperand(r10, Register::kExponentOffset));
- __ stw(hole_lower, MemOperand(r10, Register::kMantissaOffset));
+ __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
+ __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
#endif
- __ addi(r10, r10, Operand(8));
+ __ addi(scratch2, scratch2, Operand(8));
__ bind(&entry);
- __ cmp(r10, array_end);
+ __ cmp(scratch2, array_end);
__ blt(&loop);
- __ Pop(r0);
- __ mtlr(r0);
__ bind(&done);
}
@@ -290,11 +279,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// Register lr contains the return address.
- Label entry, loop, convert_hole, gc_required, only_change_map;
+ Label loop, convert_hole, gc_required, only_change_map;
Register elements = r7;
Register array = r9;
Register length = r8;
- Register scratch = r11;
+ Register scratch = r10;
+ Register scratch3 = r11;
+ Register hole_value = r14;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
@@ -340,7 +331,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ addi(src_elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(length, length);
- __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
Label initialization_loop, loop_done;
__ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
@@ -353,7 +344,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ addi(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ bind(&initialization_loop);
- __ StorePU(r10, MemOperand(dst_elements, kPointerSize));
+ __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
__ bdnz(&initialization_loop);
__ addi(dst_elements, array,
@@ -367,7 +358,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
- // r10: the-hole pointer
+ // hole_value: the-hole pointer
// heap_number_map: heap number map
__ b(&loop);
@@ -378,7 +369,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ StoreP(r10, MemOperand(dst_elements));
+ __ StoreP(hole_value, MemOperand(dst_elements));
__ addi(dst_elements, dst_elements, Operand(kPointerSize));
__ cmpl(dst_elements, dst_end);
__ bge(&loop_done);
@@ -395,7 +386,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
Register heap_number = receiver;
Register scratch2 = value;
- __ AllocateHeapNumber(heap_number, scratch2, r11, heap_number_map,
+ __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
&gc_required);
// heap_number: new heap number
#if V8_TARGET_ARCH_PPC64
@@ -416,14 +407,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ addi(dst_elements, dst_elements, Operand(kPointerSize));
__ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ b(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ StoreP(r10, MemOperand(dst_elements));
- __ addi(dst_elements, dst_elements, Operand(kPointerSize));
-
- __ bind(&entry);
__ cmpl(dst_elements, dst_end);
__ blt(&loop);
__ bind(&loop_done);
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 9434b8f92f..58d4430019 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -163,6 +163,7 @@ enum OpcodeExt2 {
SUBFCX = 8 << 1,
ADDCX = 10 << 1,
MULHWUX = 11 << 1,
+ ISEL = 15 << 1,
MFCR = 19 << 1,
LWARX = 20 << 1,
LDX = 21 << 1,
@@ -192,17 +193,17 @@ enum OpcodeExt2 {
STWX = 151 << 1, // store word w/ x-form
MTVSRD = 179 << 1, // Move To VSR Doubleword
STDUX = 181 << 1,
- STWUX = 183 << 1, // store word w/ update x-form
- /*
- MTCRF
- MTMSR
- STWCXx
- SUBFZEX
- */
- ADDZEX = 202 << 1, // Add to Zero Extended
- /*
- MTSR
- */
+ STWUX = 183 << 1, // store word w/ update x-form
+ /*
+ MTCRF
+ MTMSR
+ STWCXx
+ SUBFZEX
+ */
+ ADDZEX = 202 << 1, // Add to Zero Extended
+ /*
+ MTSR
+ */
MTVSRWA = 211 << 1, // Move To VSR Word Algebraic
STBX = 215 << 1, // store byte w/ x-form
MULLD = 233 << 1, // Multiply Low Double Word
@@ -212,13 +213,17 @@ enum OpcodeExt2 {
ADDX = 266 << 1, // Add
LHZX = 279 << 1, // load half-word zero w/ x-form
LHZUX = 311 << 1, // load half-word zero w/ update x-form
+ LWAX = 341 << 1, // load word algebraic w/ x-form
LHAX = 343 << 1, // load half-word algebraic w/ x-form
LHAUX = 375 << 1, // load half-word algebraic w/ update x-form
XORX = 316 << 1, // Exclusive OR
MFSPR = 339 << 1, // Move from Special-Purpose-Register
STHX = 407 << 1, // store half-word w/ x-form
+ ORC = 412 << 1, // Or with Complement
STHUX = 439 << 1, // store half-word w/ update x-form
ORX = 444 << 1, // Or
+ DIVDU = 457 << 1, // Divide Double Word Unsigned
+ DIVWU = 459 << 1, // Divide Word Unsigned
MTSPR = 467 << 1, // Move to Special-Purpose-Register
DIVD = 489 << 1, // Divide Double Word
DIVW = 491 << 1, // Divide Word
@@ -267,6 +272,9 @@ enum OpcodeExt4 {
FMR = 72 << 1, // Floating Move Register
MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
FABS = 264 << 1, // Floating Absolute Value
+ FRIN = 392 << 1, // Floating Round to Integer Nearest
+ FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
+ FRIP = 456 << 1, // Floating Round to Integer Plus
FRIM = 488 << 1, // Floating Round to Integer Minus
MFFS = 583 << 1, // move from FPSCR x-form
MTFSF = 711 << 1, // move to FPSCR fields XFL-form
@@ -334,26 +342,6 @@ enum {
kTOMask = 0x1f << 21
};
-// the following is to differentiate different faked opcodes for
-// the BOGUS PPC instruction we invented (when bit 25 is 0) or to mark
-// different stub code (when bit 25 is 1)
-// - use primary opcode 1 for undefined instruction
-// - use bit 25 to indicate whether the opcode is for fake-arm
-// instr or stub-marker
-// - use the least significant 6-bit to indicate FAKE_OPCODE_T or
-// MARKER_T
-#define FAKE_OPCODE 1 << 26
-#define MARKER_SUBOPCODE_BIT 25
-#define MARKER_SUBOPCODE 1 << MARKER_SUBOPCODE_BIT
-#define FAKER_SUBOPCODE 0 << MARKER_SUBOPCODE_BIT
-
-enum FAKE_OPCODE_T {
- fBKPT = 14,
- fLastFaker // can't be more than 128 (2^^7)
-};
-#define FAKE_OPCODE_HIGH_BIT 7 // fake opcode has to fall into bit 0~7
-#define F_NEXT_AVAILABLE_STUB_MARKER 369 // must be less than 2^^9 (512)
-#define STUB_MARKER_HIGH_BIT 9 // stub marker has to fall into bit 0~9
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants.
@@ -411,9 +399,7 @@ enum SoftwareInterruptCodes {
// break point
kBreakpoint = 0x821008, // bits23-0 of 0x7d821008 = twge r2, r2
// stop
- kStopCode = 1 << 23,
- // info
- kInfo = 0x9ff808 // bits23-0 of 0x7d9ff808 = twge r31, r31
+ kStopCode = 1 << 23
};
const uint32_t kStopCodeMask = kStopCode - 1;
const uint32_t kMaxStopCode = kStopCode - 1;
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 58e9e939f5..ac1504c020 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -25,6 +25,12 @@ int Deoptimizer::patch_size() {
}
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+ // Empty because there is no need for relocation information for the code
+ // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
+
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
@@ -86,9 +92,12 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
+ // We ensure the values are Smis to avoid confusing the garbage
+ // collector in the event that any values are retreived and stored
+ // elsewhere.
for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
+ input_->SetRegister(i, reinterpret_cast<intptr_t>(Smi::FromInt(i)));
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 63cec8cd85..3472828eee 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -77,7 +77,6 @@ class Decoder {
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
void UnknownFormat(Instruction* instr, const char* opcname);
- void MarkerFormat(Instruction* instr, const char* opcname, int id);
void DecodeExt1(Instruction* instr);
void DecodeExt2(Instruction* instr);
@@ -360,13 +359,6 @@ void Decoder::UnknownFormat(Instruction* instr, const char* name) {
}
-void Decoder::MarkerFormat(Instruction* instr, const char* name, int id) {
- char buffer[100];
- snprintf(buffer, sizeof(buffer), "%s %d", name, id);
- Format(instr, buffer);
-}
-
-
void Decoder::DecodeExt1(Instruction* instr) {
switch (instr->Bits(10, 1) << 1) {
case MCRF: {
@@ -605,43 +597,43 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "cmpw 'ra, 'rb");
}
#endif
- break;
+ return;
}
case SLWX: {
Format(instr, "slw'. 'ra, 'rs, 'rb");
- break;
+ return;
}
#if V8_TARGET_ARCH_PPC64
case SLDX: {
Format(instr, "sld'. 'ra, 'rs, 'rb");
- break;
+ return;
}
#endif
case SUBFCX: {
Format(instr, "subfc'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case ADDCX: {
Format(instr, "addc'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case CNTLZWX: {
Format(instr, "cntlzw'. 'ra, 'rs");
- break;
+ return;
}
#if V8_TARGET_ARCH_PPC64
case CNTLZDX: {
Format(instr, "cntlzd'. 'ra, 'rs");
- break;
+ return;
}
#endif
case ANDX: {
Format(instr, "and'. 'ra, 'rs, 'rb");
- break;
+ return;
}
case ANDCX: {
Format(instr, "andc'. 'ra, 'rs, 'rb");
- break;
+ return;
}
case CMPL: {
#if V8_TARGET_ARCH_PPC64
@@ -653,55 +645,59 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "cmplw 'ra, 'rb");
}
#endif
- break;
+ return;
}
case NEGX: {
Format(instr, "neg'. 'rt, 'ra");
- break;
+ return;
}
case NORX: {
Format(instr, "nor'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case SUBFX: {
Format(instr, "subf'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case MULHWX: {
Format(instr, "mulhw'o'. 'rt, 'ra, 'rb");
- break;
+ return;
}
case ADDZEX: {
Format(instr, "addze'. 'rt, 'ra");
- break;
+ return;
}
case MULLW: {
Format(instr, "mullw'o'. 'rt, 'ra, 'rb");
- break;
+ return;
}
#if V8_TARGET_ARCH_PPC64
case MULLD: {
Format(instr, "mulld'o'. 'rt, 'ra, 'rb");
- break;
+ return;
}
#endif
case DIVW: {
Format(instr, "divw'o'. 'rt, 'ra, 'rb");
- break;
+ return;
+ }
+ case DIVWU: {
+ Format(instr, "divwu'o'. 'rt, 'ra, 'rb");
+ return;
}
#if V8_TARGET_ARCH_PPC64
case DIVD: {
Format(instr, "divd'o'. 'rt, 'ra, 'rb");
- break;
+ return;
}
#endif
case ADDX: {
Format(instr, "add'o 'rt, 'ra, 'rb");
- break;
+ return;
}
case XORX: {
Format(instr, "xor'. 'ra, 'rs, 'rb");
- break;
+ return;
}
case ORX: {
if (instr->RTValue() == instr->RBValue()) {
@@ -709,7 +705,7 @@ void Decoder::DecodeExt2(Instruction* instr) {
} else {
Format(instr, "or 'ra, 'rs, 'rb");
}
- break;
+ return;
}
case MFSPR: {
int spr = instr->Bits(20, 11);
@@ -718,7 +714,7 @@ void Decoder::DecodeExt2(Instruction* instr) {
} else {
Format(instr, "mfspr 'rt ??");
}
- break;
+ return;
}
case MTSPR: {
int spr = instr->Bits(20, 11);
@@ -729,98 +725,113 @@ void Decoder::DecodeExt2(Instruction* instr) {
} else {
Format(instr, "mtspr 'rt ??");
}
- break;
+ return;
}
case MFCR: {
Format(instr, "mfcr 'rt");
- break;
+ return;
}
case STWX: {
Format(instr, "stwx 'rs, 'ra, 'rb");
- break;
+ return;
}
case STWUX: {
Format(instr, "stwux 'rs, 'ra, 'rb");
- break;
+ return;
}
case STBX: {
Format(instr, "stbx 'rs, 'ra, 'rb");
- break;
+ return;
}
case STBUX: {
Format(instr, "stbux 'rs, 'ra, 'rb");
- break;
+ return;
}
case STHX: {
Format(instr, "sthx 'rs, 'ra, 'rb");
- break;
+ return;
}
case STHUX: {
Format(instr, "sthux 'rs, 'ra, 'rb");
- break;
+ return;
}
case LWZX: {
Format(instr, "lwzx 'rt, 'ra, 'rb");
- break;
+ return;
}
case LWZUX: {
Format(instr, "lwzux 'rt, 'ra, 'rb");
- break;
+ return;
+ }
+ case LWAX: {
+ Format(instr, "lwax 'rt, 'ra, 'rb");
+ return;
}
case LBZX: {
Format(instr, "lbzx 'rt, 'ra, 'rb");
- break;
+ return;
}
case LBZUX: {
Format(instr, "lbzux 'rt, 'ra, 'rb");
- break;
+ return;
}
case LHZX: {
Format(instr, "lhzx 'rt, 'ra, 'rb");
- break;
+ return;
}
case LHZUX: {
Format(instr, "lhzux 'rt, 'ra, 'rb");
- break;
+ return;
+ }
+ case LHAX: {
+ Format(instr, "lhax 'rt, 'ra, 'rb");
+ return;
}
#if V8_TARGET_ARCH_PPC64
case LDX: {
Format(instr, "ldx 'rt, 'ra, 'rb");
- break;
+ return;
}
case LDUX: {
Format(instr, "ldux 'rt, 'ra, 'rb");
- break;
+ return;
}
case STDX: {
Format(instr, "stdx 'rt, 'ra, 'rb");
- break;
+ return;
}
case STDUX: {
Format(instr, "stdux 'rt, 'ra, 'rb");
- break;
+ return;
}
case MFVSRD: {
Format(instr, "mffprd 'ra, 'Dt");
- break;
+ return;
}
case MFVSRWZ: {
Format(instr, "mffprwz 'ra, 'Dt");
- break;
+ return;
}
case MTVSRD: {
Format(instr, "mtfprd 'Dt, 'ra");
- break;
+ return;
}
case MTVSRWA: {
Format(instr, "mtfprwa 'Dt, 'ra");
- break;
+ return;
}
case MTVSRWZ: {
Format(instr, "mtfprwz 'Dt, 'ra");
- break;
+ return;
}
#endif
+ }
+
+ switch (instr->Bits(5, 1) << 1) {
+ case ISEL: {
+ Format(instr, "isel 'rt, 'ra, 'rb");
+ return;
+ }
default: {
Unknown(instr); // not used by V8
}
@@ -913,8 +924,20 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fabs'. 'Dt, 'Db");
break;
}
+ case FRIN: {
+ Format(instr, "frin. 'Dt, 'Db");
+ break;
+ }
+ case FRIZ: {
+ Format(instr, "friz. 'Dt, 'Db");
+ break;
+ }
+ case FRIP: {
+ Format(instr, "frip. 'Dt, 'Db");
+ break;
+ }
case FRIM: {
- Format(instr, "frim 'Dt, 'Db");
+ Format(instr, "frim. 'Dt, 'Db");
break;
}
case FNEG: {
@@ -1252,18 +1275,6 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
break;
}
#endif
-
- case FAKE_OPCODE: {
- if (instr->Bits(MARKER_SUBOPCODE_BIT, MARKER_SUBOPCODE_BIT) == 1) {
- int marker_code = instr->Bits(STUB_MARKER_HIGH_BIT, 0);
- DCHECK(marker_code < F_NEXT_AVAILABLE_STUB_MARKER);
- MarkerFormat(instr, "stub-marker ", marker_code);
- } else {
- int fake_opcode = instr->Bits(FAKE_OPCODE_HIGH_BIT, 0);
- MarkerFormat(instr, "faker-opcode ", fake_opcode);
- }
- break;
- }
default: {
Unknown(instr);
break;
diff --git a/deps/v8/src/ppc/full-codegen-ppc.cc b/deps/v8/src/ppc/full-codegen-ppc.cc
index 1bb4f54f4a..213756e875 100644
--- a/deps/v8/src/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/ppc/full-codegen-ppc.cc
@@ -123,7 +123,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ if (is_sloppy(info->language_mode()) && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset), r0);
@@ -158,7 +158,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!info->function()->is_generator() || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -267,7 +267,7 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
@@ -890,15 +890,16 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
+ ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
- DCHECK(variable->interface()->IsFrozen());
+ DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(r4, scope_->ContextChainLength(scope_->ScriptScope()));
- __ LoadP(r4, ContextOperand(r4, variable->interface()->Index()));
+ __ LoadP(r4, ContextOperand(r4, descriptor->Index()));
__ LoadP(r4, ContextOperand(r4, Context::EXTENSION_INDEX));
// Assign it.
@@ -1066,6 +1067,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
+ SetExpressionPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r3, ip);
@@ -1171,6 +1173,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
+ SetExpressionPosition(stmt->each());
+
// Load the current count to r3, load the length to r4.
__ LoadP(r3, MemOperand(sp, 0 * kPointerSize));
__ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
@@ -1217,6 +1221,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
{
EffectContext context(this);
EmitAssignment(stmt->each());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
// Generate code for the body of the loop.
@@ -1243,46 +1248,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
- Comment cmnt(masm_, "[ ForOfStatement");
- SetStatementPosition(stmt);
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- // var iterator = iterable[Symbol.iterator]();
- VisitForEffect(stmt->assign_iterator());
-
- // Loop entry.
- __ bind(loop_statement.continue_label());
-
- // result = iterator.next()
- VisitForEffect(stmt->next_result());
-
- // if (result.done) break;
- Label result_not_done;
- VisitForControl(stmt->result_done(), loop_statement.break_label(),
- &result_not_done, &result_not_done);
- __ bind(&result_not_done);
-
- // each = result.value
- VisitForEffect(stmt->assign_each());
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
- EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
- __ b(loop_statement.continue_label());
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(loop_statement.break_label());
- decrement_loop_depth();
-}
-
-
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@@ -1293,7 +1258,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
scope()->is_function_scope() && info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ mov(r5, Operand(info));
__ CallStub(&stub);
} else {
@@ -1338,6 +1303,19 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
}
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+ int offset) {
+ if (NeedsHomeObject(initializer)) {
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ CallStoreIC();
+ }
+}
+
+
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@@ -1516,6 +1494,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
+ } else if (var->is_this()) {
+ CHECK(info_->function() != nullptr &&
+ (info_->function()->kind() & kSubclassConstructor) != 0);
+ // TODO(dslomov): implement 'this' hole check elimination.
+ skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
@@ -1663,11 +1646,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
__ push(r3); // Save result on stack
@@ -1690,6 +1675,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+ if (NeedsHomeObject(value)) {
+ __ Move(StoreDescriptor::ReceiverRegister(), r3);
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(), MemOperand(sp));
+ CallStoreIC();
+ }
} else {
VisitForEffect(value);
}
@@ -1701,6 +1694,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
+ EmitSetHomeObjectIfNeeded(value, 2);
__ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
__ push(r3);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1713,17 +1707,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ LoadP(r3, MemOperand(sp));
__ push(r3);
VisitForStackValue(value);
- if (property->emit_store()) {
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- __ Drop(2);
- }
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
break;
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = value;
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = value;
+ }
break;
}
}
@@ -1736,12 +1731,77 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r3);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
+ EmitSetHomeObjectIfNeeded(it->second->getter, 2);
EmitAccessor(it->second->setter);
+ EmitSetHomeObjectIfNeeded(it->second->setter, 3);
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
__ push(r3);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(r3); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ LoadP(r3, MemOperand(sp)); // Duplicate receiver.
+ __ push(r3);
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ mov(r3, Operand(Smi::FromInt(NONE)));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ mov(r3, Operand(Smi::FromInt(NONE)));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ break;
+ }
+ }
+ }
+
if (expr->has_function()) {
DCHECK(result_saved);
__ LoadP(r3, MemOperand(sp));
@@ -1793,6 +1853,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
bool result_saved = false; // Is the result saved to the stack?
@@ -1931,16 +1992,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(r3); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(), op, mode, expr->target(),
+ EmitInlineSmiBinaryOp(expr->binary_operation(), op, expr->target(),
expr->value());
} else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
+ EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
@@ -2156,15 +2214,6 @@ void FullCodeGenerator::EmitGeneratorResume(
VisitForAccumulatorValue(value);
__ pop(r4);
- // Check generator state.
- Label wrong_state, closed_state, done;
- __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
- __ CmpSmiLiteral(r6, Smi::FromInt(0), r0);
- __ beq(&closed_state);
- __ blt(&wrong_state);
-
// Load suspended function and context.
__ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
@@ -2193,7 +2242,7 @@ void FullCodeGenerator::EmitGeneratorResume(
// Enter a new JavaScript frame, and initialize its slots as they were when
// the generator was suspended.
- Label resume_frame;
+ Label resume_frame, done;
__ bind(&push_frame);
__ b(&resume_frame, SetLK);
__ b(&done);
@@ -2257,26 +2306,6 @@ void FullCodeGenerator::EmitGeneratorResume(
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
- // Reach here when generator is closed.
- __ bind(&closed_state);
- if (resume_mode == JSGeneratorObject::NEXT) {
- // Return completed iterator result when generator is closed.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ push(r5);
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(true);
- } else {
- // Throw the provided value.
- __ push(r3);
- __ CallRuntime(Runtime::kThrow, 1);
- }
- __ b(&done);
-
- // Throw error if we attempt to operate on a running generator.
- __ bind(&wrong_state);
- __ push(r4);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
__ bind(&done);
context()->Plug(result_register());
}
@@ -2374,7 +2403,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left_expr,
Expression* right_expr) {
Label done, smi_case, stub_call;
@@ -2394,7 +2422,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ b(&done);
@@ -2432,13 +2460,13 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
case Token::ADD: {
__ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
- __ bne(&stub_call, cr0);
+ __ BranchOnOverflow(&stub_call);
__ mr(right, scratch1);
break;
}
case Token::SUB: {
__ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
- __ bne(&stub_call, cr0);
+ __ BranchOnOverflow(&stub_call);
__ mr(right, scratch1);
break;
}
@@ -2451,7 +2479,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Mul(scratch1, r0, ip);
// Check for overflowing the smi range - no overflow if higher 33 bits of
// the result are identical.
- __ TestIfInt32(scratch1, scratch2, ip);
+ __ TestIfInt32(scratch1, r0);
__ bne(&stub_call);
#else
__ SmiUntag(ip, right);
@@ -2512,9 +2540,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
- Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- DCHECK(key != NULL);
if (property->is_static()) {
__ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
@@ -2522,23 +2548,29 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ LoadP(scratch, MemOperand(sp, 0)); // prototype
}
__ push(scratch);
- VisitForStackValue(key);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
- __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ __ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
break;
case ObjectLiteral::Property::SETTER:
- __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ __ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
break;
default:
@@ -2554,10 +2586,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op,
- OverwriteMode mode) {
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(r4);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2632,7 +2663,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
break;
}
@@ -2701,8 +2732,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
// Assignment to var.
__ push(r3); // Value.
__ mov(r4, Operand(var->name()));
- __ mov(r3, Operand(Smi::FromInt(strict_mode())));
- __ Push(cp, r4, r3); // Context, name, strict mode.
+ __ mov(r3, Operand(Smi::FromInt(language_mode())));
+ __ Push(cp, r4, r3); // Context, name, language mode.
__ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
// Assignment to var or initializing assignment to let/const in harmony
@@ -2717,8 +2748,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
+ } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
- // Non-initializing assignments to consts are ignored.
}
@@ -2750,8 +2782,8 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(r3);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
+ __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
4);
}
@@ -2763,9 +2795,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(r3);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime(
+ (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
@@ -2777,7 +2810,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r3));
- Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2800,8 +2834,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(r3);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
@@ -2816,8 +2848,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
- context()->Plug(r3);
}
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(r3);
}
@@ -2970,8 +3003,8 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(isolate(), arg_count, call_type);
- __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2999,8 +3032,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ LoadP(r6, MemOperand(fp, receiver_offset * kPointerSize), r0);
- // r5: strict mode.
- __ LoadSmiLiteral(r5, Smi::FromInt(strict_mode()));
+ // r5: language mode.
+ __ LoadSmiLiteral(r5, Smi::FromInt(language_mode()));
// r4: the start position of the scope the calls resides in.
__ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
@@ -3137,11 +3170,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
}
} else if (call_type == Call::SUPER_CALL) {
- SuperReference* super_ref = callee->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ Push(result_register());
- VisitForStackValue(super_ref->this_var());
- EmitCall(expr, CallICState::METHOD);
+ EmitSuperConstructorCall(expr);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -3171,12 +3200,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- if (expr->expression()->IsSuperReference()) {
- EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
- __ Push(result_register());
- } else {
- VisitForStackValue(expr->expression());
- }
+ DCHECK(!expr->expression()->IsSuperReference());
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3210,6 +3235,61 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ EmitLoadSuperConstructor(super_ref);
+ __ push(result_register());
+
+ Variable* this_var = super_ref->this_var()->var();
+
+ GetVar(r3, this_var);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ Label uninitialized_this;
+ __ beq(&uninitialized_this);
+ __ mov(r3, Operand(this_var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into r1 and r0.
+ __ mov(r3, Operand(arg_count));
+ __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ UNREACHABLE();
+ /* TODO(dslomov): support pretenuring.
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ */
+ }
+
+ __ Move(r5, FeedbackVector());
+ __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
+
+ // TODO(dslomov): use a different stub and propagate new.target.
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ RecordJSReturnSite(expr);
+
+ EmitVariableAssignment(this_var, Token::INIT_CONST);
+ context()->Plug(r3);
+}
+
+
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3753,7 +3833,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -4089,7 +4169,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(2, args->length());
- DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
@@ -4509,7 +4589,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ LoadSmiLiteral(r4, Smi::FromInt(strict_mode()));
+ __ LoadSmiLiteral(r4, Smi::FromInt(language_mode()));
__ push(r4);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r3);
@@ -4517,7 +4597,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- DCHECK(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(is_sloppy(language_mode()) || var->is_this());
if (var->IsUnallocated()) {
__ LoadP(r5, GlobalObjectOperand());
__ mov(r4, Operand(var->name()));
@@ -4730,6 +4810,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4764,8 +4845,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4834,7 +4914,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 693f341e99..f82d85ded0 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -98,7 +98,19 @@ void FastCloneShallowObjectDescriptor::Initialize(
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r5, r6};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r5, r6, r4};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
@@ -125,6 +137,16 @@ void CallFunctionWithFeedbackDescriptor::Initialize(
}
+void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r4, r6, r5};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// r3 : number of arguments
// r4 : the function to call
@@ -152,6 +174,15 @@ void TransitionElementsKindDescriptor::Initialize(
}
+void AllocateHeapNumberDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ Register registers[] = {cp};
+ data->Initialize(arraysize(registers), registers, nullptr);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// register state
@@ -290,6 +321,27 @@ void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
r7, // call_data
r5, // holder
r4, // api_function_address
+ r6, // actual number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Integer32(), // actual number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ r3, // callee
+ r7, // call_data
+ r5, // holder
+ r4, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/ppc/lithium-codegen-ppc.cc
index 7b6052c3e6..4d17189c84 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.cc
@@ -58,7 +58,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
}
@@ -118,7 +117,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() && info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -328,7 +327,7 @@ bool LCodeGen::GenerateJumpTable() {
DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
@@ -772,7 +771,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- const char* detail,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
CRegister cr) {
LEnvironment* environment = instr->environment();
@@ -813,20 +812,21 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
__ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() ||
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
@@ -836,10 +836,11 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, CRegister cr) {
+ Deoptimizer::DeoptReason deopt_reason,
+ CRegister cr) {
Deoptimizer::BailoutType bailout_type =
info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, detail, bailout_type, cr);
+ DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
}
@@ -861,6 +862,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1065,12 +1067,12 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ ExtractBitRange(dividend, dividend, shift - 1, 0);
__ neg(dividend, dividend, LeaveOE, SetRC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", cr0);
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
}
} else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ li(dividend, Operand::Zero());
} else {
- DeoptimizeIf(al, instr, "minus zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
}
__ b(&done);
}
@@ -1092,7 +1094,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, "division by zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1107,7 +1109,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ bne(&remainder_not_zero, cr0);
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -1119,9 +1121,10 @@ void LCodeGen::DoModI(LModI* instr) {
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
Register scratch = scratch0();
+ bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
Label done;
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ if (can_overflow) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
}
@@ -1131,21 +1134,26 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, "division by zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
}
// Check for kMinInt % -1, divw will return undefined, which is not what we
// want. We have to deopt if we care about -0, because we can't return that.
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
+ if (can_overflow) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(overflow, instr, "minus zero", cr0);
+ DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
} else {
- __ bnooverflow(&no_overflow_possible, cr0);
- __ li(result_reg, Operand::Zero());
- __ b(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(overflow, result_reg, r0, result_reg, cr0);
+ __ boverflow(&done, cr0);
+ } else {
+ Label no_overflow_possible;
+ __ bnooverflow(&no_overflow_possible, cr0);
+ __ li(result_reg, Operand::Zero());
+ __ b(&done);
+ __ bind(&no_overflow_possible);
+ }
}
- __ bind(&no_overflow_possible);
}
__ mullw(scratch, right_reg, scratch);
@@ -1155,7 +1163,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ bne(&done, cr0);
__ cmpwi(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
__ bind(&done);
@@ -1173,13 +1181,13 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(dividend, r0);
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1187,7 +1195,7 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
__ TestBitRange(dividend, shift - 1, 0, r0);
- DeoptimizeIf(ne, instr, "lost precision", cr0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1217,7 +1225,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, "division by zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1225,7 +1233,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1236,7 +1244,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ mullw(scratch, result, ip);
__ cmpw(scratch, dividend);
- DeoptimizeIf(ne, instr, "lost precision");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1247,11 +1255,12 @@ void LCodeGen::DoDivI(LDivI* instr) {
const Register dividend = ToRegister(instr->dividend());
const Register divisor = ToRegister(instr->divisor());
Register result = ToRegister(instr->result());
+ bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
DCHECK(!dividend.is(result));
DCHECK(!divisor.is(result));
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ if (can_overflow) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
}
@@ -1261,7 +1270,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, "division by zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1270,21 +1279,25 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmpwi(dividend, Operand::Zero());
__ bne(&dividend_not_zero);
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
+ if (can_overflow) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(overflow, instr, "overflow", cr0);
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
- __ bnooverflow(&no_overflow_possible, cr0);
- __ mr(result, dividend);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(overflow, result, dividend, result, cr0);
+ } else {
+ Label no_overflow_possible;
+ __ bnooverflow(&no_overflow_possible, cr0);
+ __ mr(result, dividend);
+ __ bind(&no_overflow_possible);
+ }
}
- __ bind(&no_overflow_possible);
}
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
@@ -1292,7 +1305,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
Register scratch = scratch0();
__ mullw(scratch, divisor, result);
__ cmpw(dividend, scratch);
- DeoptimizeIf(ne, instr, "lost precision");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1302,6 +1315,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
Register result = ToRegister(instr->result());
int32_t divisor = instr->divisor();
+ bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
// If the divisor is positive, things are easy: There can be no deopts and we
// can simply do an arithmetic right shift.
@@ -1316,13 +1330,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
OEBit oe = LeaveOE;
#if V8_TARGET_ARCH_PPC64
- if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ if (divisor == -1 && can_overflow) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(dividend, r0);
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
}
#else
- if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ if (can_overflow) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
oe = SetOE;
@@ -1331,12 +1345,12 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ neg(result, dividend, oe, SetRC);
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, "minus zero", cr0);
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
}
// If the negation could not overflow, simply shifting is OK.
#if !V8_TARGET_ARCH_PPC64
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ if (!can_overflow) {
#endif
if (shift) {
__ ShiftRightArithImm(result, result, shift);
@@ -1347,7 +1361,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
- DeoptimizeIf(overflow, instr, "overflow", cr0);
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
return;
}
@@ -1369,7 +1383,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, "division by zero");
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1377,7 +1391,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1414,11 +1428,12 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
const Register dividend = ToRegister(instr->dividend());
const Register divisor = ToRegister(instr->divisor());
Register result = ToRegister(instr->result());
+ bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
DCHECK(!dividend.is(result));
DCHECK(!divisor.is(result));
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ if (can_overflow) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
}
@@ -1428,7 +1443,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, "division by zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1437,21 +1452,25 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmpwi(dividend, Operand::Zero());
__ bne(&dividend_not_zero);
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
+ if (can_overflow) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(overflow, instr, "overflow", cr0);
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
- __ bnooverflow(&no_overflow_possible, cr0);
- __ mr(result, dividend);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(overflow, result, dividend, result, cr0);
+ } else {
+ Label no_overflow_possible;
+ __ bnooverflow(&no_overflow_possible, cr0);
+ __ mr(result, dividend);
+ __ bind(&no_overflow_possible);
+ }
}
- __ bind(&no_overflow_possible);
}
Label done;
@@ -1515,7 +1534,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmpi(left, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
switch (constant) {
@@ -1527,12 +1546,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
__ neg(result, left, SetOE, SetRC);
- DeoptimizeIf(overflow, instr, "overflow", cr0);
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
#if V8_TARGET_ARCH_PPC64
} else {
__ neg(result, left);
- __ TestIfInt32(result, scratch, r0);
- DeoptimizeIf(ne, instr, "overflow");
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
}
#endif
} else {
@@ -1552,7 +1571,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ cmpwi(left, Operand::Zero());
}
#endif
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
__ li(result, Operand::Zero());
break;
@@ -1604,8 +1623,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ Mul(result, left, right);
}
- __ TestIfInt32(result, scratch, r0);
- DeoptimizeIf(ne, instr, "overflow");
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
@@ -1620,7 +1639,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ mullw(result, left, right);
}
__ TestIfInt32(scratch, result, r0);
- DeoptimizeIf(ne, instr, "overflow");
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
#endif
} else {
if (instr->hydrogen()->representation().IsSmi()) {
@@ -1647,7 +1666,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
#endif
// Bail out if the result is minus zero.
__ cmpi(result, Operand::Zero());
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
@@ -1733,7 +1752,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
#if V8_TARGET_ARCH_PPC64
__ extsw(result, result, SetRC);
#endif
- DeoptimizeIf(lt, instr, "negative value", cr0);
+ DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
} else {
__ srw(result, left, scratch);
}
@@ -1773,7 +1792,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ cmpwi(left, Operand::Zero());
- DeoptimizeIf(lt, instr, "negative value");
+ DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
}
__ Move(result, left);
}
@@ -1792,7 +1811,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
- DeoptimizeIf(lt, instr, "overflow", cr0);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
#endif
} else {
__ slwi(result, left, Operand(shift_count));
@@ -1817,12 +1836,23 @@ void LCodeGen::DoSubI(LSubI* instr) {
Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (!can_overflow) {
+#if V8_TARGET_ARCH_PPC64
+ const bool isInteger = !instr->hydrogen()->representation().IsSmi();
+#else
+ const bool isInteger = false;
+#endif
+ if (!can_overflow || isInteger) {
if (right->IsConstantOperand()) {
__ Add(result, left, -(ToOperand(right).immediate()), r0);
} else {
__ sub(result, left, EmitLoadRegister(right, ip));
}
+#if V8_TARGET_ARCH_PPC64
+ if (can_overflow) {
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ }
+#endif
} else {
if (right->IsConstantOperand()) {
__ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
@@ -1831,20 +1861,8 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
scratch0(), r0);
}
-// Doptimize on overflow
-#if V8_TARGET_ARCH_PPC64
- if (!instr->hydrogen()->representation().IsSmi()) {
- __ extsw(scratch0(), scratch0(), SetRC);
- }
-#endif
- DeoptimizeIf(lt, instr, "overflow", cr0);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
}
-
-#if V8_TARGET_ARCH_PPC64
- if (!instr->hydrogen()->representation().IsSmi()) {
- __ extsw(result, result);
- }
-#endif
}
@@ -1876,11 +1894,23 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
}
-// TODO(penguin): put const to constant pool instead
-// of storing double to stack
void LCodeGen::DoConstantD(LConstantD* instr) {
DCHECK(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
+#if V8_HOST_ARCH_IA32
+ // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
+ // builds.
+ uint64_t bits = instr->bits();
+ if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
+ V8_UINT64_C(0x7FF0000000000000)) {
+ uint32_t lo = static_cast<uint32_t>(bits);
+ uint32_t hi = static_cast<uint32_t>(bits >> 32);
+ __ mov(ip, Operand(lo));
+ __ mov(scratch0(), Operand(hi));
+ __ MovInt64ToDouble(result, scratch0(), ip);
+ return;
+ }
+#endif
double v = instr->value();
__ LoadDoubleLiteral(result, v, scratch0());
}
@@ -1917,9 +1947,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(!scratch.is(object));
__ TestIfSmi(object, r0);
- DeoptimizeIf(eq, instr, "Smi", cr0);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, "not a date object");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -2029,16 +2059,24 @@ void LCodeGen::DoAddI(LAddI* instr) {
Register result = ToRegister(instr->result());
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
#if V8_TARGET_ARCH_PPC64
- bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
- instr->hydrogen()->representation().IsExternal());
+ const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+ instr->hydrogen()->representation().IsExternal());
+#else
+ const bool isInteger = false;
#endif
- if (!can_overflow) {
+ if (!can_overflow || isInteger) {
if (right->IsConstantOperand()) {
__ Add(result, left, ToOperand(right).immediate(), r0);
} else {
__ add(result, left, EmitLoadRegister(right, ip));
}
+#if V8_TARGET_ARCH_PPC64
+ if (can_overflow) {
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ }
+#endif
} else {
if (right->IsConstantOperand()) {
__ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
@@ -2047,20 +2085,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
scratch0(), r0);
}
-// Doptimize on overflow
-#if V8_TARGET_ARCH_PPC64
- if (isInteger) {
- __ extsw(scratch0(), scratch0(), SetRC);
- }
-#endif
- DeoptimizeIf(lt, instr, "overflow", cr0);
- }
-
-#if V8_TARGET_ARCH_PPC64
- if (isInteger) {
- __ extsw(result, result);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
}
-#endif
}
@@ -2083,12 +2109,16 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ cmpw(left_reg, right_reg);
}
#endif
- __ b(cond, &return_left);
- __ Move(result_reg, right_reg);
- __ b(&done);
- __ bind(&return_left);
- __ Move(result_reg, left_reg);
- __ bind(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(cond, result_reg, left_reg, right_reg);
+ } else {
+ __ b(cond, &return_left);
+ __ Move(result_reg, right_reg);
+ __ b(&done);
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+ __ bind(&done);
+ }
} else {
DCHECK(instr->hydrogen()->representation().IsDouble());
DoubleRegister left_reg = ToDoubleRegister(left);
@@ -2176,8 +2206,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r3));
DCHECK(ToRegister(instr->result()).is(r3));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2295,7 +2324,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ TestIfSmi(reg, r0);
- DeoptimizeIf(eq, instr, "Smi", cr0);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
}
const Register map = scratch0();
@@ -2352,7 +2381,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, "unexpected object");
+ DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
}
}
}
@@ -2815,15 +2844,22 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- Label equal, done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&equal);
- __ mov(r3, Operand(factory()->false_value()));
- __ b(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ mov(r4, Operand(factory()->true_value()));
+ __ mov(r5, Operand(factory()->false_value()));
+ __ cmpi(r3, Operand::Zero());
+ __ isel(eq, r3, r4, r5);
+ } else {
+ Label equal, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&equal);
+ __ mov(r3, Operand(factory()->false_value()));
+ __ b(&done);
- __ bind(&equal);
- __ mov(r3, Operand(factory()->true_value()));
- __ bind(&done);
+ __ bind(&equal);
+ __ mov(r3, Operand(factory()->true_value()));
+ __ bind(&done);
+ }
}
@@ -2922,10 +2958,15 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ Move(InstanceofStub::right(), instr->function());
// Include instructions below in delta: mov + call = mov + (mov + 2)
- static const int kAdditionalDelta = (2 * Assembler::kMovInstructions) + 2;
+ static const int kAdditionalDelta = 2 * Assembler::kMovInstructions + 2;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ if (Assembler::kMovInstructions != 1 &&
+ is_int16(delta * Instruction::kInstrSize)) {
+ // The following mov will be an li rather than a multi-instruction form
+ delta -= Assembler::kMovInstructions - 1;
+ }
// r8 is used to communicate the offset to the location of the map check.
__ mov(r8, Operand(delta * Instruction::kInstrSize));
}
@@ -2950,17 +2991,23 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
__ cmpi(r3, Operand::Zero());
Condition condition = ComputeCompareCondition(op);
- Label true_value, done;
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadRoot(r4, Heap::kTrueValueRootIndex);
+ __ LoadRoot(r5, Heap::kFalseValueRootIndex);
+ __ isel(condition, ToRegister(instr->result()), r4, r5);
+ } else {
+ Label true_value, done;
- __ b(condition, &true_value);
+ __ b(condition, &true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ b(&done);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ b(&done);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
+ __ bind(&done);
+ }
}
@@ -2987,6 +3034,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ addi(sp, sp, Operand(sp_delta));
}
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
if (NeedsEagerFrame()) {
@@ -3011,7 +3059,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
}
@@ -3019,13 +3067,18 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(r3));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(r3));
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ mov(slot_register, Operand(Smi::FromInt(index)));
}
@@ -3061,7 +3114,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register payload = ToRegister(instr->temp());
__ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
// Store the value.
@@ -3076,14 +3129,22 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadP(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, "hole");
+ __ cmp(result, ip);
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
} else {
- Label skip;
- __ bne(&skip);
- __ mov(result, Operand(factory()->undefined_value()));
- __ bind(&skip);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ Register scratch = scratch0();
+ __ mov(scratch, Operand(factory()->undefined_value()));
+ __ cmp(result, ip);
+ __ isel(eq, result, scratch, result);
+ } else {
+ Label skip;
+ __ cmp(result, ip);
+ __ bne(&skip);
+ __ mov(result, Operand(factory()->undefined_value()));
+ __ bind(&skip);
+ }
}
}
}
@@ -3102,7 +3163,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
} else {
__ bne(&skip_assignment);
}
@@ -3135,6 +3196,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
if (instr->hydrogen()->representation().IsDouble()) {
+ DCHECK(access.IsInobject());
DoubleRegister result = ToDoubleRegister(instr->result());
__ lfd(result, FieldMemOperand(object, offset));
return;
@@ -3153,11 +3215,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (representation.IsSmi() &&
instr->hydrogen()->representation().IsInteger32()) {
// Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
-#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
-#endif
+ offset = SmiWordOffset(offset);
representation = Representation::Integer32();
}
#endif
@@ -3194,18 +3252,25 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
// If the function does not have an initial map, we're done.
- Label done;
- __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
- __ bne(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ // Get the prototype from the initial map (optimistic).
+ __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ __ isel(eq, result, ip, result);
+ } else {
+ Label done;
+ __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ __ bne(&done);
- // Get the prototype from the initial map.
- __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ // Get the prototype from the initial map.
+ __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
- // All done.
- __ bind(&done);
+ // All done.
+ __ bind(&done);
+ }
}
@@ -3319,11 +3384,10 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
if (key_is_constant) {
- __ LoadHalfWord(result, mem_operand, r0);
+ __ LoadHalfWordArith(result, mem_operand, r0);
} else {
- __ lhzx(result, mem_operand);
+ __ lhax(result, mem_operand);
}
- __ extsh(result, result);
break;
case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
@@ -3336,13 +3400,10 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
if (key_is_constant) {
- __ LoadWord(result, mem_operand, r0);
+ __ LoadWordArith(result, mem_operand, r0);
} else {
- __ lwzx(result, mem_operand);
+ __ lwax(result, mem_operand);
}
-#if V8_TARGET_ARCH_PPC64
- __ extsw(result, result);
-#endif
break;
case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
@@ -3354,7 +3415,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmplw(result, r0);
- DeoptimizeIf(ge, instr, "negative value");
+ DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -3417,7 +3478,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
}
__ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
}
@@ -3457,11 +3518,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
DCHECK(!requires_hole_check);
// Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
-#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
-#endif
+ offset = SmiWordOffset(offset);
}
#endif
@@ -3472,11 +3529,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
__ TestIfSmi(result, r0);
- DeoptimizeIf(ne, instr, "not a Smi", cr0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr, "hole");
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
}
}
@@ -3546,7 +3603,6 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ subi(result, sp, Operand(2 * kPointerSize));
} else {
// Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
__ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ LoadP(result,
MemOperand(scratch, StandardFrameConstants::kContextOffset));
@@ -3554,13 +3610,18 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
- __ beq(&adapted);
- __ mr(result, fp);
- __ b(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(eq, result, scratch, fp);
+ } else {
+ Label done, adapted;
+ __ beq(&adapted);
+ __ mr(result, fp);
+ __ b(&done);
- __ bind(&adapted);
- __ mr(result, scratch);
- __ bind(&done);
+ __ bind(&adapted);
+ __ mr(result, scratch);
+ __ bind(&done);
+ }
}
}
@@ -3635,9 +3696,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ TestIfSmi(receiver, r0);
- DeoptimizeIf(eq, instr, "Smi");
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr, "not a JavaScript object");
+ DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
@@ -3670,7 +3731,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpli(length, Operand(kArgumentsLimit));
- DeoptimizeIf(gt, instr, "too many arguments");
+ DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -3749,21 +3810,19 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr, R4State r4_state) {
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
+ Register function_reg = r4;
+
LPointerMap* pointers = instr->pointer_map();
if (can_invoke_directly) {
- if (r4_state == R4_UNINITIALIZED) {
- __ Move(r4, function);
- }
-
// Change context.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
// Set r3 to arguments count if adaption is not needed. Assumes that r3
// is available to write to at this point.
@@ -3777,7 +3836,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (is_self_call) {
__ CallSelf();
} else {
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
__ CallJSEntry(ip);
}
@@ -3787,7 +3846,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
}
}
@@ -3803,7 +3862,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, ip);
- DeoptimizeIf(ne, instr, "not a heap number");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
Label done;
Register exponent = scratch0();
@@ -3873,7 +3932,7 @@ void LCodeGen::EmitMathAbs(LMathAbs* instr) {
__ mtxer(r0);
__ neg(result, result, SetOE, SetRC);
// Deoptimize on overflow.
- DeoptimizeIf(overflow, instr, "overflow", cr0);
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
__ bind(&done);
}
@@ -3890,7 +3949,7 @@ void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
// Deoptimize on overflow.
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(input, r0);
- DeoptimizeIf(eq, instr, "overflow");
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
__ neg(result, result);
__ bind(&done);
@@ -3949,7 +4008,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
&exact);
- DeoptimizeIf(al, instr, "lost precision or NaN");
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3957,7 +4016,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cmpi(result, Operand::Zero());
__ bne(&done);
__ cmpwi(input_high, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
__ bind(&done);
}
@@ -3976,7 +4035,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ LoadDoubleLiteral(dot_five, 0.5, r0);
__ fabs(double_scratch1, input);
__ fcmpu(double_scratch1, dot_five);
- DeoptimizeIf(unordered, instr, "lost precision or NaN");
+ DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
// If input is in [-0.5, -0], the result is -0.
// If input is in [+0, +0.5[, the result is +0.
// If the input is +0.5, the result is 1.
@@ -3989,25 +4048,31 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
#endif
__ cmpi(scratch1, Operand::Zero());
// [-0.5, -0].
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
- Label return_zero;
__ fcmpu(input, dot_five);
- __ bne(&return_zero);
- __ li(result, Operand(1)); // +0.5.
- __ b(&done);
- // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
- // flag kBailoutOnMinusZero.
- __ bind(&return_zero);
- __ li(result, Operand::Zero());
- __ b(&done);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ li(result, Operand(1));
+ __ isel(lt, result, r0, result);
+ __ b(&done);
+ } else {
+ Label return_zero;
+ __ bne(&return_zero);
+ __ li(result, Operand(1)); // +0.5.
+ __ b(&done);
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero.
+ __ bind(&return_zero);
+ __ li(result, Operand::Zero());
+ __ b(&done);
+ }
__ bind(&convert);
__ fadd(input_plus_dot_five, input, dot_five);
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
double_scratch0(), &done, &done);
- DeoptimizeIf(al, instr, "lost precision or NaN");
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
__ bind(&done);
}
@@ -4054,9 +4119,7 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
-#ifdef DEBUG
Register tagged_exponent = MathPowTaggedDescriptor::exponent();
-#endif
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(d2));
DCHECK(!instr->right()->IsRegister() ||
@@ -4069,11 +4132,12 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(r5, &no_deopt);
- __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DCHECK(!r10.is(tagged_exponent));
+ __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r10, ip);
- DeoptimizeIf(ne, instr, "not a heap number");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -4131,7 +4195,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr, R4_CONTAINS_TARGET);
+ instr->arity(), instr);
}
}
@@ -4144,45 +4208,74 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(r4));
DCHECK(name.is(r5));
+ Register scratch = r7;
+ Register extra = r8;
+ Register extra2 = r9;
+ Register extra3 = r10;
- Register scratch = r6;
- Register extra = r7;
- Register extra2 = r8;
- Register extra3 = r9;
+#ifdef DEBUG
+ Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+ Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(slot, vector, scratch, extra, extra2, extra3));
+#endif
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, extra, extra2, extra3);
+ if (!instr->hydrogen()->is_just_miss()) {
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, extra, extra2, extra3);
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r3));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ Call(code, RelocInfo::CODE_TARGET);
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ CallJSEntry(ip);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(ip);
+ }
+ generator.AfterCall();
}
- generator.AfterCall();
}
@@ -4222,8 +4315,30 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(r3));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(r6));
+ DCHECK(vector_register.is(r5));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ Move(vector_register, vector);
+ __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4337,7 +4452,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
IsSmi(LConstantOperand::cast(instr->value())));
#endif
- if (representation.IsDouble()) {
+ if (!FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
DCHECK(!hinstr->has_transition());
DCHECK(!hinstr->NeedsWriteBarrier());
@@ -4360,45 +4475,50 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- Register value = ToRegister(instr->value());
-
+ Register record_dest = object;
+ Register record_value = no_reg;
+ Register record_scratch = scratch;
#if V8_TARGET_ARCH_PPC64
- // 64-bit Smi optimization
- if (representation.IsSmi() &&
- hinstr->value()->representation().IsInteger32()) {
- DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- // Store int value directly to upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
-#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
-#endif
- representation = Representation::Integer32();
- }
-#endif
-
- if (access.IsInobject()) {
- MemOperand operand = FieldMemOperand(object, offset);
- __ StoreRepresentation(value, operand, representation, r0);
+ if (FLAG_unbox_double_fields && representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ stfd(value, FieldMemOperand(object, offset));
if (hinstr->NeedsWriteBarrier()) {
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(
- object, offset, value, scratch, GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
- hinstr->PointersToHereCheckForValue());
+ record_value = ToRegister(instr->value());
}
} else {
- __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- MemOperand operand = FieldMemOperand(scratch, offset);
- __ StoreRepresentation(value, operand, representation, r0);
- if (hinstr->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(
- scratch, offset, value, object, GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
- hinstr->PointersToHereCheckForValue());
+ if (representation.IsSmi() &&
+ hinstr->value()->representation().IsInteger32()) {
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ // 64-bit Smi optimization
+ // Store int value directly to upper half of the smi.
+ offset = SmiWordOffset(offset);
+ representation = Representation::Integer32();
}
+#endif
+ if (access.IsInobject()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ record_value = value;
+ } else {
+ Register value = ToRegister(instr->value());
+ __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ record_dest = scratch;
+ record_value = value;
+ record_scratch = object;
+ }
+#if V8_TARGET_ARCH_PPC64
+ }
+#endif
+
+ if (hinstr->NeedsWriteBarrier()) {
+ __ RecordWriteField(record_dest, offset, record_value, record_scratch,
+ GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+ hinstr->PointersToHereCheckForValue());
}
}
@@ -4409,7 +4529,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4452,7 +4572,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, "out of bounds");
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
}
}
@@ -4590,7 +4710,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
}
if (instr->NeedsCanonicalization()) {
- // Force a canonical NaN.
+ // Turn potential sNaN value into qNaN.
__ CanonicalizeNaN(double_scratch, value);
__ stfd(double_scratch, MemOperand(elements, base_offset));
} else {
@@ -4635,11 +4755,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
// Store int value directly to upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
-#if V8_TARGET_LITTLE_ENDIAN
- offset += kPointerSize / 2;
-#endif
+ offset = SmiWordOffset(offset);
}
#endif
@@ -4678,7 +4794,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4725,7 +4841,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr, "memento found");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -5040,13 +5156,13 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ TestUnsignedSmiCandidate(input, r0);
- DeoptimizeIf(ne, instr, "overflow", cr0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
}
#if !V8_TARGET_ARCH_PPC64
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, r0);
- DeoptimizeIf(lt, instr, "overflow", cr0);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
} else {
#endif
__ SmiTag(output, input);
@@ -5061,11 +5177,10 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
if (instr->needs_check()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, value of scratch won't be zero.
__ andi(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, "not a Smi", cr0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
} else {
__ SmiUntag(result, input);
}
@@ -5095,7 +5210,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ bne(&convert);
} else {
- DeoptimizeIf(ne, instr, "not a heap number");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
}
// load heap number
__ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
@@ -5111,7 +5226,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ bne(&done);
__ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
#endif
- DeoptimizeIf(eq, instr, "minus zero");
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
__ b(&done);
if (can_convert_undefined_to_nan) {
@@ -5119,7 +5234,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, ip);
- DeoptimizeIf(ne, instr, "not a heap number/undefined");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ b(&done);
@@ -5181,10 +5296,10 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(input_reg, ip);
- DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", cr7);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
__ li(input_reg, Operand::Zero());
} else {
- DeoptimizeIf(ne, instr, "not a heap number", cr7);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
__ lfd(double_scratch2,
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
@@ -5194,7 +5309,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
}
__ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
double_scratch);
- DeoptimizeIf(ne, instr, "lost precision or NaN", cr7);
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmpi(input_reg, Operand::Zero());
@@ -5203,7 +5318,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
FieldMemOperand(scratch2, HeapNumber::kValueOffset +
Register::kExponentOffset));
__ cmpwi(scratch1, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero", cr7);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
}
__ bind(&done);
@@ -5272,7 +5387,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmpi(result_reg, Operand::Zero());
@@ -5283,7 +5398,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ MovDoubleHighToInt(scratch1, double_input);
#endif
__ cmpi(scratch1, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
@@ -5302,7 +5417,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, "lost precision or NaN");
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmpi(result_reg, Operand::Zero());
@@ -5313,7 +5428,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ MovDoubleHighToInt(scratch1, double_input);
#endif
__ cmpi(scratch1, Operand::Zero());
- DeoptimizeIf(lt, instr, "minus zero");
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
@@ -5321,7 +5436,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ SmiTag(result_reg);
#else
__ SmiTagCheckOverflow(result_reg, r0);
- DeoptimizeIf(lt, instr, "overflow", cr0);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
#endif
}
@@ -5329,7 +5444,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input), r0);
- DeoptimizeIf(ne, instr, "not a Smi", cr0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
}
@@ -5337,7 +5452,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input), r0);
- DeoptimizeIf(eq, instr, "Smi", cr0);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
}
}
@@ -5358,13 +5473,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, "wrong instance type");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
} else {
- DeoptimizeIf(lt, instr, "wrong instance type");
+ DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpli(scratch, Operand(last));
- DeoptimizeIf(gt, instr, "wrong instance type");
+ DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
}
}
} else {
@@ -5375,11 +5490,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ andi(r0, scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", cr0);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
+ cr0);
} else {
__ andi(scratch, scratch, Operand(mask));
__ cmpi(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, "wrong instance type");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
}
}
}
@@ -5398,7 +5514,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmpi(reg, Operand(object), r0);
}
- DeoptimizeIf(ne, instr, "value mismatch");
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
}
@@ -5413,7 +5529,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r3, scratch0());
}
__ TestIfSmi(scratch0(), r0);
- DeoptimizeIf(eq, instr, "instance migration failed", cr0);
+ DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
}
@@ -5471,7 +5587,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ bne(deferred->entry());
} else {
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
}
__ bind(&success);
@@ -5510,7 +5626,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
- DeoptimizeIf(ne, instr, "not a heap number/undefined");
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ li(result_reg, Operand::Zero());
__ b(&done);
@@ -5599,7 +5715,6 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ bind(deferred->exit());
if (instr->hydrogen()->MustPrefillWithFiller()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ LoadIntLiteral(scratch, size - kHeapObjectTag);
@@ -5723,7 +5838,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ mov(r5, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -5982,19 +6097,19 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r3, ip);
- DeoptimizeIf(eq, instr, "undefined");
+ DeoptimizeIf(eq, instr, Deoptimizer::kUndefined);
Register null_value = r8;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r3, null_value);
- DeoptimizeIf(eq, instr, "null");
+ DeoptimizeIf(eq, instr, Deoptimizer::kNull);
__ TestIfSmi(r3, r0);
- DeoptimizeIf(eq, instr, "Smi", cr0);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, "wrong instance type");
+ DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
@@ -6010,7 +6125,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r4, ip);
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
__ bind(&use_cache);
}
@@ -6030,7 +6145,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmpi(result, Operand::Zero());
- DeoptimizeIf(eq, instr, "no cache");
+ DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
__ bind(&done);
}
@@ -6041,7 +6156,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr, "wrong map");
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
}
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.h b/deps/v8/src/ppc/lithium-codegen-ppc.h
index 8ae3b3c5d3..7da125396e 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.h
@@ -135,7 +135,7 @@ class LCodeGen : public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictMode strict_mode() const { return info()->strict_mode(); }
+ LanguageMode language_mode() const { return info()->language_mode(); }
Scope* scope() const { return scope_; }
@@ -190,13 +190,11 @@ class LCodeGen : public LCodeGenBase {
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
LInstruction* instr, LOperand* context);
- enum R4State { R4_UNINITIALIZED, R4_CONTAINS_TARGET };
-
// Generate a direct call to a known function. Expects the function
// to be in r4.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr, R4State r4_state);
+ LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -204,10 +202,10 @@ class LCodeGen : public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, Deoptimizer::BailoutType bailout_type,
- CRegister cr = cr7);
+ Deoptimizer::DeoptReason deopt_reason,
+ Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- const char* detail, CRegister cr = cr7);
+ Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
void AddToTranslation(LEnvironment* environment, Translation* translation,
LOperand* op, bool is_tagged, bool is_uint32,
diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/ppc/lithium-ppc.cc
index 42470c53a0..d54c7ec46a 100644
--- a/deps/v8/src/ppc/lithium-ppc.cc
+++ b/deps/v8/src/ppc/lithium-ppc.cc
@@ -268,6 +268,20 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
+void LCallFunction::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ if (hydrogen()->HasVectorAndSlot()) {
+ stream->Add(" (type-feedback-vector ");
+ temp_vector()->PrintTo(stream);
+ stream->Add(" ");
+ temp_slot()->PrintTo(stream);
+ stream->Add(")");
+ }
+}
+
+
void LCallJSFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -1104,9 +1118,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
@@ -1241,7 +1263,15 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r4);
- LCallFunction* call = new (zone()) LCallFunction(context, function);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(r6);
+ vector = FixedTemp(r5);
+ }
+
+ LCallFunction* call =
+ new (zone()) LCallFunction(context, function, slot, vector);
return MarkAsCall(DefineFixed(call, r3), instr);
}
@@ -1399,8 +1429,15 @@ LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
- LFlooringDivI* div = new (zone()) LFlooringDivI(dividend, divisor);
- return AssignEnvironment(DefineAsRegister(div));
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1490,9 +1527,10 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+ int32_t constant_value = 0;
if (right->IsConstant()) {
HConstant* constant = HConstant::cast(right);
- int32_t constant_value = constant->Integer32Value();
+ constant_value = constant->Integer32Value();
// Constants -1, 0 and 1 can be optimized if the result can overflow.
// For other constants, it can be optimized only without overflow.
if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
@@ -1515,34 +1553,15 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
right_op = UseRegister(right);
}
LMulI* mul = new (zone()) LMulI(left_op, right_op);
- if (can_overflow || bailout_on_minus_zero) {
+ if (right_op->IsConstantOperand()
+ ? ((can_overflow && constant_value == -1) ||
+ (bailout_on_minus_zero && constant_value <= 0))
+ : (can_overflow || bailout_on_minus_zero)) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
- if (instr->HasOneUse() &&
- (instr->uses().value()->IsAdd() || instr->uses().value()->IsSub())) {
- HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
-
- if (use->IsAdd() && instr == use->left()) {
- // This mul is the lhs of an add. The add and mul will be folded into a
- // multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->right() && use->IsAdd() &&
- !(use->left()->IsMul() && use->left()->HasOneUse())) {
- // This mul is the rhs of an add, where the lhs is not another mul.
- // The add and mul will be folded into a multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->left() && use->IsSub()) {
- // This mul is the lhs of a sub. The mul and sub will be folded into a
- // multiply-sub in DoSub.
- return NULL;
- }
- }
-
return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
@@ -1570,10 +1589,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
- if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
- return DoMultiplySub(instr->right(), HMul::cast(instr->left()));
- }
-
return DoArithmeticD(Token::SUB, instr);
} else {
return DoArithmeticT(Token::SUB, instr);
@@ -1638,15 +1653,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* result = DefineAsRegister(add);
return result;
} else if (instr->representation().IsDouble()) {
- if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
- return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
- }
-
- if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
- DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse());
- return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
- }
-
return DoArithmeticD(Token::ADD, instr);
} else {
return DoArithmeticT(Token::ADD, instr);
@@ -2103,7 +2109,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@@ -2163,7 +2169,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
@@ -2230,7 +2236,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/ppc/lithium-ppc.h
index 2176fa66c5..ac7b505b98 100644
--- a/deps/v8/src/ppc/lithium-ppc.h
+++ b/deps/v8/src/ppc/lithium-ppc.h
@@ -285,7 +285,7 @@ class LTemplateResultInstruction : public LInstruction {
STATIC_ASSERT(R == 0 || R == 1);
bool HasResult() const FINAL { return R != 0 && result() != NULL; }
void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const { return results_[0]; }
+ LOperand* result() const OVERRIDE { return results_[0]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
@@ -466,25 +466,30 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+ LOperand* name, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
+
class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
@@ -1309,6 +1314,7 @@ class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
DECLARE_HYDROGEN_ACCESSOR(Constant)
double value() const { return hydrogen()->DoubleValue(); }
+ uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
};
@@ -1745,7 +1751,7 @@ class LStoreCodeEntry FINAL : public LTemplateInstruction<0, 2, 0> {
LOperand* function() { return inputs_[0]; }
LOperand* code_object() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
+ void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@@ -1822,9 +1828,10 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
const CallInterfaceDescriptor descriptor() { return descriptor_; }
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
void PrintDataTo(StringStream* stream) OVERRIDE;
@@ -1861,20 +1868,26 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LCallFunction(LOperand* context, LOperand* function) {
+ LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = function;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
int arity() const { return hydrogen()->argument_count() - 1; }
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -2130,7 +2143,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
@@ -2189,7 +2202,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 0b3d72945f..2c9f7aa7a9 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -613,26 +613,8 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
- Label done;
-
- // Test for NaN
- fcmpu(src, src);
-
- if (dst.is(src)) {
- bordered(&done);
- } else {
- Label is_nan;
- bunordered(&is_nan);
- fmr(dst, src);
- b(&done);
- bind(&is_nan);
- }
-
- // Replace with canonical NaN.
- double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double();
- LoadDoubleLiteral(dst, nan_value, r0);
-
- bind(&done);
+ // Turn potential sNaN into qNaN.
+ fadd(dst, src, kDoubleRegZero);
}
@@ -915,7 +897,8 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context) {
+ bool restore_context,
+ bool argument_count_is_length) {
#if V8_OOL_CONSTANT_POOL
ConstantPoolUnavailableScope constant_pool_unavailable(this);
#endif
@@ -948,7 +931,9 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
LeaveFrame(StackFrame::EXIT);
if (argument_count.is_valid()) {
- ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
+ if (!argument_count_is_length) {
+ ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
+ }
add(sp, sp, argument_count);
}
}
@@ -1515,7 +1500,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
- DCHECK_EQ(FIELD, 0);
+ DCHECK_EQ(DATA, 0);
and_(r0, t1, ip, SetRC);
bne(miss, cr0);
@@ -1742,9 +1727,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object,
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- mov(r0, Operand(~kHeapObjectTagMask));
- and_(object, object, r0);
-// was.. and_(object, object, Operand(~kHeapObjectTagMask));
+ ClearRightImm(object, object, Operand(kHeapObjectTagSize));
#ifdef DEBUG
// Check that the object un-allocated is below the current top.
mov(scratch, Operand(new_space_allocation_top));
@@ -1942,7 +1925,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
DONT_DO_SMI_CHECK);
lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- // Force a canonical NaN.
+ // Double value, turn potential sNaN into qNaN.
CanonicalizeNaN(double_scratch);
b(&store);
@@ -1967,23 +1950,26 @@ void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
DCHECK(!overflow_dst.is(left));
DCHECK(!overflow_dst.is(right));
+ bool left_is_right = left.is(right);
+ RCBit xorRC = left_is_right ? SetRC : LeaveRC;
+
// C = A+B; C overflows if A/B have same sign and C has diff sign than A
if (dst.is(left)) {
mr(scratch, left); // Preserve left.
add(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
+ xor_(overflow_dst, dst, scratch, xorRC); // Original left.
+ if (!left_is_right) xor_(scratch, dst, right);
} else if (dst.is(right)) {
mr(scratch, right); // Preserve right.
add(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
+ xor_(overflow_dst, dst, left, xorRC);
+ if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
} else {
add(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
+ xor_(overflow_dst, dst, left, xorRC);
+ if (!left_is_right) xor_(scratch, dst, right);
}
- and_(overflow_dst, scratch, overflow_dst, SetRC);
+ if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
}
@@ -2085,22 +2071,42 @@ void MacroAssembler::CheckMap(Register obj, Register scratch,
}
-void MacroAssembler::DispatchMap(Register obj, Register scratch,
- Handle<Map> map, Handle<Code> success,
- SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+ Register scratch2, Handle<WeakCell> cell,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
- LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- mov(r0, Operand(map));
- cmp(scratch, r0);
- bne(&fail);
- Jump(success, RelocInfo::CODE_TARGET, al);
+ LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CmpWeakValue(scratch1, cell, scratch2);
+ Jump(success, RelocInfo::CODE_TARGET, eq);
bind(&fail);
}
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+ Register scratch, CRegister cr) {
+ mov(scratch, Operand(cell));
+ LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
+ cmp(value, scratch, cr);
+}
+
+
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
+ mov(value, Operand(cell));
+ LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+ Label* miss) {
+ GetWeakValue(value, cell);
+ JumpIfSmi(value, miss);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss,
bool miss_on_bound_function) {
@@ -2177,138 +2183,6 @@ void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
}
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address, ExternalReference thunk_ref, int stack_space,
- MemOperand return_value_operand, MemOperand* context_restore_operand) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()), next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()), next_address);
-
- DCHECK(function_address.is(r4) || function_address.is(r5));
- Register scratch = r6;
-
- Label profiler_disabled;
- Label end_profiler_check;
- mov(scratch, Operand(ExternalReference::is_profiling_address(isolate())));
- lbz(scratch, MemOperand(scratch, 0));
- cmpi(scratch, Operand::Zero());
- beq(&profiler_disabled);
-
- // Additional parameter is the address of the actual callback.
- mov(scratch, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- mr(scratch, function_address);
- bind(&end_profiler_check);
-
- // Allocate HandleScope in callee-save registers.
- // r17 - next_address
- // r14 - next_address->kNextOffset
- // r15 - next_address->kLimitOffset
- // r16 - next_address->kLevelOffset
- mov(r17, Operand(next_address));
- LoadP(r14, MemOperand(r17, kNextOffset));
- LoadP(r15, MemOperand(r17, kLimitOffset));
- lwz(r16, MemOperand(r17, kLevelOffset));
- addi(r16, r16, Operand(1));
- stw(r16, MemOperand(r17, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, r3);
- mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(this, scratch);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, r3);
- mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // load value from ReturnValue
- LoadP(r3, return_value_operand);
- bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- StoreP(r14, MemOperand(r17, kNextOffset));
- if (emit_debug_code()) {
- lwz(r4, MemOperand(r17, kLevelOffset));
- cmp(r4, r16);
- Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
- }
- subi(r16, r16, Operand(1));
- stw(r16, MemOperand(r17, kLevelOffset));
- LoadP(r0, MemOperand(r17, kLimitOffset));
- cmp(r15, r0);
- bne(&delete_allocated_handles);
-
- // Check if the function scheduled an exception.
- bind(&leave_exit_frame);
- LoadRoot(r14, Heap::kTheHoleValueRootIndex);
- mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate())));
- LoadP(r15, MemOperand(r15));
- cmp(r14, r15);
- bne(&promote_scheduled_exception);
- bind(&exception_handled);
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- LoadP(cp, *context_restore_operand);
- }
- // LeaveExitFrame expects unwind space to be in a register.
- mov(r14, Operand(stack_space));
- LeaveExitFrame(false, r14, !restore_context);
- blr();
-
- bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
- }
- jmp(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- StoreP(r15, MemOperand(r17, kLimitOffset));
- mr(r14, r3);
- PrepareCallCFunction(1, r15);
- mov(r3, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
- 1);
- mr(r3, r14);
- b(&leave_exit_frame);
-}
-
-
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2352,7 +2226,7 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
result, double_scratch);
#if V8_TARGET_ARCH_PPC64
- TestIfInt32(result, scratch, r0);
+ TestIfInt32(result, r0);
#else
TestIfInt32(scratch, result, r0);
#endif
@@ -2389,7 +2263,7 @@ void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
// Test for overflow
#if V8_TARGET_ARCH_PPC64
- TestIfInt32(result, scratch, r0);
+ TestIfInt32(result, r0);
#else
TestIfInt32(scratch, result, r0);
#endif
@@ -2409,7 +2283,9 @@ void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
DoubleRegister double_scratch = kScratchDoubleReg;
+#if !V8_TARGET_ARCH_PPC64
Register scratch = ip;
+#endif
ConvertDoubleToInt64(double_input,
#if !V8_TARGET_ARCH_PPC64
@@ -2419,7 +2295,7 @@ void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
// Test for overflow
#if V8_TARGET_ARCH_PPC64
- TestIfInt32(result, scratch, r0);
+ TestIfInt32(result, r0);
#else
TestIfInt32(scratch, result, r0);
#endif
@@ -2733,8 +2609,8 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
LoadP(scratch,
MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- LoadP(scratch, FieldMemOperand(scratch, offset));
- cmp(map_in_out, scratch);
+ LoadP(ip, FieldMemOperand(scratch, offset));
+ cmp(map_in_out, ip);
bne(no_map_match);
// Use the transitioned cached map.
@@ -2820,7 +2696,6 @@ void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
Label* on_not_both_smi) {
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
orx(r0, reg1, reg2, LeaveRC);
JumpIfNotSmi(r0, on_not_both_smi);
}
@@ -2829,8 +2704,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
Label* smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- TestBit(src, 0, r0);
+ TestBitRange(src, kSmiTagSize - 1, 0, r0);
SmiUntag(dst, src);
beq(smi_case, cr0);
}
@@ -2839,8 +2713,7 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
Label* non_smi_case) {
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- TestBit(src, 0, r0);
+ TestBitRange(src, kSmiTagSize - 1, 0, r0);
SmiUntag(dst, src);
bne(non_smi_case, cr0);
}
@@ -3693,17 +3566,6 @@ void MacroAssembler::CheckPageFlag(
}
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map, Register scratch,
- Label* if_deprecated) {
- if (map->CanBeDeprecated()) {
- mov(scratch, Operand(map));
- lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
- ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC);
- bne(if_deprecated, cr0);
- }
-}
-
-
void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
Register scratch1, Label* on_black) {
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
@@ -3896,28 +3758,38 @@ void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
// if input_value > 255, output_value is 255
// otherwise output_value is the input_value
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- Label done, negative_label, overflow_label;
int satval = (1 << 8) - 1;
- cmpi(input_reg, Operand::Zero());
- blt(&negative_label);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ // set to 0 if negative
+ cmpi(input_reg, Operand::Zero());
+ isel(lt, output_reg, r0, input_reg);
- cmpi(input_reg, Operand(satval));
- bgt(&overflow_label);
- if (!output_reg.is(input_reg)) {
- mr(output_reg, input_reg);
- }
- b(&done);
-
- bind(&negative_label);
- li(output_reg, Operand::Zero()); // set to 0 if negative
- b(&done);
+ // set to satval if > satval
+ li(r0, Operand(satval));
+ cmpi(output_reg, Operand(satval));
+ isel(lt, output_reg, output_reg, r0);
+ } else {
+ Label done, negative_label, overflow_label;
+ cmpi(input_reg, Operand::Zero());
+ blt(&negative_label);
+
+ cmpi(input_reg, Operand(satval));
+ bgt(&overflow_label);
+ if (!output_reg.is(input_reg)) {
+ mr(output_reg, input_reg);
+ }
+ b(&done);
+ bind(&negative_label);
+ li(output_reg, Operand::Zero()); // set to 0 if negative
+ b(&done);
- bind(&overflow_label); // set to satval if > satval
- li(output_reg, Operand(satval));
+ bind(&overflow_label); // set to satval if > satval
+ li(output_reg, Operand(satval));
- bind(&done);
+ bind(&done);
+ }
}
@@ -3982,6 +3854,20 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
}
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ LoadP(dst,
+ FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ const int getterOffset = AccessorPair::kGetterOffset;
+ const int setterOffset = AccessorPair::kSetterOffset;
+ int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
+ LoadP(dst, FieldMemOperand(dst, offset));
+}
+
+
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Register empty_fixed_array_value = r9;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@@ -4422,9 +4308,10 @@ void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
- if (!scratch.is(no_reg) && !is_int16(offset)) {
+ if (!is_int16(offset)) {
/* cannot use d-form */
- LoadIntLiteral(scratch, offset);
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
#if V8_TARGET_ARCH_PPC64
ldx(dst, MemOperand(mem.ra(), scratch));
#else
@@ -4454,9 +4341,10 @@ void MacroAssembler::StoreP(Register src, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
- if (!scratch.is(no_reg) && !is_int16(offset)) {
+ if (!is_int16(offset)) {
/* cannot use d-form */
- LoadIntLiteral(scratch, offset);
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
#if V8_TARGET_ARCH_PPC64
stdx(src, MemOperand(mem.ra(), scratch));
#else
@@ -4489,15 +4377,10 @@ void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
- if (!scratch.is(no_reg) && !is_int16(offset)) {
- /* cannot use d-form */
- LoadIntLiteral(scratch, offset);
-#if V8_TARGET_ARCH_PPC64
- // lwax(dst, MemOperand(mem.ra(), scratch));
- DCHECK(0); // lwax not yet implemented
-#else
- lwzx(dst, MemOperand(mem.ra(), scratch));
-#endif
+ if (!is_int16(offset)) {
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
+ lwax(dst, MemOperand(mem.ra(), scratch));
} else {
#if V8_TARGET_ARCH_PPC64
int misaligned = (offset & 3);
@@ -4549,6 +4432,20 @@ void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
}
+void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
+ Register scratch) {
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
+ lhax(dst, MemOperand(mem.ra(), scratch));
+ } else {
+ lha(dst, mem);
+ }
+}
+
+
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
@@ -4622,13 +4519,12 @@ void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
} else if (r.IsUInteger8()) {
LoadByte(dst, mem, scratch);
} else if (r.IsInteger16()) {
- LoadHalfWord(dst, mem, scratch);
- extsh(dst, dst);
+ LoadHalfWordArith(dst, mem, scratch);
} else if (r.IsUInteger16()) {
LoadHalfWord(dst, mem, scratch);
#if V8_TARGET_ARCH_PPC64
} else if (r.IsInteger32()) {
- LoadWord(dst, mem, scratch);
+ LoadWordArith(dst, mem, scratch);
#endif
} else {
LoadP(dst, mem, scratch);
@@ -4658,6 +4554,34 @@ void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
}
+void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ lfdx(dst, MemOperand(base, scratch));
+ } else {
+ lfd(dst, mem);
+ }
+}
+
+
+void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ stfdx(src, MemOperand(base, scratch));
+ } else {
+ stfd(src, mem);
+ }
+}
+
+
void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 8f1aeab09f..146489d131 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -177,9 +177,6 @@ class MacroAssembler : public Assembler {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
- void CheckMapDeprecated(Handle<Map> map, Register scratch,
- Label* if_deprecated);
-
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
@@ -389,7 +386,8 @@ class MacroAssembler : public Assembler {
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context);
+ bool restore_context,
+ bool argument_count_is_length = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -433,26 +431,26 @@ class MacroAssembler : public Assembler {
void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
-
void LoadWordArith(Register dst, const MemOperand& mem,
Register scratch = no_reg);
-
void StoreWord(Register src, const MemOperand& mem, Register scratch);
void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
-
+ void LoadHalfWordArith(Register dst, const MemOperand& mem,
+ Register scratch = no_reg);
void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
void LoadByte(Register dst, const MemOperand& mem, Register scratch);
-
void StoreByte(Register src, const MemOperand& mem, Register scratch);
void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
Register scratch = no_reg);
-
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
+ void LoadDouble(DoubleRegister dst, const MemOperand& mem, Register scratch);
+ void StoreDouble(DoubleRegister src, const MemOperand& mem, Register scratch);
+
// Move values between integer and floating point registers.
void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
@@ -762,12 +760,22 @@ class MacroAssembler : public Assembler {
Label* fail, SmiCheckType smi_check_type);
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj, Register scratch, Handle<Map> map,
- Handle<Code> success, SmiCheckType smi_check_type);
+ // Check if the map of an object is equal to a specified weak map and branch
+ // to a specified target if equal. Skip the smi check if not required
+ // (object is known to be a heap object)
+ void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+ Handle<WeakCell> cell, Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Compare the given value and the value of weak cell.
+ void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
+ CRegister cr = cr7);
+ void GetWeakValue(Register value, Handle<WeakCell> cell);
+
+ // Load the value of the weak cell in the value register. Branch to the given
+ // miss label if the weak cell was cleared.
+ void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
@@ -781,7 +789,7 @@ class MacroAssembler : public Assembler {
LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
andi(r0, type, Operand(kIsNotStringMask));
- DCHECK_EQ(0, kStringTag);
+ DCHECK_EQ(0u, kStringTag);
return eq;
}
@@ -965,15 +973,6 @@ class MacroAssembler : public Assembler {
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call JS arguments space and
- // the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref, int stack_space,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@@ -1123,7 +1122,7 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Smi utilities
- // Shift left by 1
+ // Shift left by kSmiShift
void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
@@ -1137,6 +1136,7 @@ class MacroAssembler : public Assembler {
inline void JumpIfNotSmiCandidate(Register value, Register scratch,
Label* not_smi_label) {
// High bits must be identical to fit into an Smi
+ STATIC_ASSERT(kSmiShift == 1);
addis(scratch, value, Operand(0x40000000u >> 16));
cmpi(scratch, Operand::Zero());
blt(not_smi_label);
@@ -1235,16 +1235,15 @@ class MacroAssembler : public Assembler {
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
inline void TestIfSmi(Register value, Register scratch) {
- TestBit(value, 0, scratch); // tst(value, Operand(kSmiTagMask));
+ TestBitRange(value, kSmiTagSize - 1, 0, scratch);
}
inline void TestIfPositiveSmi(Register value, Register scratch) {
- STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
- (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
#if V8_TARGET_ARCH_PPC64
- rldicl(scratch, value, 1, kBitsPerPointer - 2, SetRC);
+ rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC);
#else
- rlwinm(scratch, value, 1, kBitsPerPointer - 2, kBitsPerPointer - 1, SetRC);
+ rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize),
+ kBitsPerPointer - 1, SetRC);
#endif
}
@@ -1269,12 +1268,11 @@ class MacroAssembler : public Assembler {
#if V8_TARGET_ARCH_PPC64
- inline void TestIfInt32(Register value, Register scratch1, Register scratch2,
+ inline void TestIfInt32(Register value, Register scratch,
CRegister cr = cr7) {
// High bits must be identical to fit into an 32-bit integer
- srawi(scratch1, value, 31);
- sradi(scratch2, value, 32);
- cmp(scratch1, scratch2, cr);
+ extsw(scratch, value);
+ cmp(scratch, value, cr);
}
#else
inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
@@ -1285,6 +1283,18 @@ class MacroAssembler : public Assembler {
}
#endif
+#if V8_TARGET_ARCH_PPC64
+ // Ensure it is permissable to read/write int value directly from
+ // upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#endif
+#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
+#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#else
+#define SmiWordOffset(offset) offset
+#endif
+
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
@@ -1369,15 +1379,18 @@ class MacroAssembler : public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template <typename Field>
- void DecodeField(Register dst, Register src) {
- ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
+ void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
+ ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
+ rc);
}
template <typename Field>
- void DecodeField(Register reg) {
- DecodeField<Field>(reg, reg);
+ void DecodeField(Register reg, RCBit rc = LeaveRC) {
+ DecodeField<Field>(reg, reg, rc);
}
template <typename Field>
diff --git a/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
index 54acce16fb..5772724b92 100644
--- a/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
@@ -98,11 +98,11 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
+ Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -115,9 +115,7 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Mode mode,
DCHECK_EQ(0, registers_to_save % 2);
// Called from C
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
__ b(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
diff --git a/deps/v8/src/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/ppc/regexp-macro-assembler-ppc.h
index 1f9c3a0f38..0dd292b317 100644
--- a/deps/v8/src/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/regexp-macro-assembler-ppc.h
@@ -16,7 +16,8 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerPPC(Mode mode, int registers_to_save, Zone* zone);
+ RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
virtual ~RegExpMacroAssemblerPPC();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 0d10153790..0bb2da05ff 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -37,7 +37,6 @@ class PPCDebugger {
~PPCDebugger();
void Stop(Instruction* instr);
- void Info(Instruction* instr);
void Debug();
private:
@@ -132,15 +131,6 @@ void PPCDebugger::Stop(Instruction* instr) {
#endif
-void PPCDebugger::Info(Instruction* instr) {
- // Retrieve the encoded address immediately following the Info breakpoint.
- char* msg =
- *reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
- PrintF("Simulator info %s\n", msg);
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
-}
-
-
intptr_t PPCDebugger::GetRegisterValue(int regnum) {
return sim_->get_register(regnum);
}
@@ -989,7 +979,9 @@ void Simulator::GetFpArgs(double* x, double* y, intptr_t* z) {
// The return value is in d1.
-void Simulator::SetFpResult(const double& result) { fp_registers_[1] = result; }
+void Simulator::SetFpResult(const double& result) {
+ set_d_register_from_double(1, result);
+}
void Simulator::TrashCallerSaveRegisters() {
@@ -1148,31 +1140,43 @@ bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
}
-#if !V8_TARGET_ARCH_PPC64
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in runtime.cc
-// uses the ObjectPair which is essentially two 32-bit values stuffed into a
-// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the r4 result register contains a bogus
-// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
-#else
-// For 64-bit, we need to be more explicit.
-typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+#if V8_TARGET_ARCH_PPC64
struct ObjectPair {
intptr_t x;
intptr_t y;
};
-typedef struct ObjectPair (*SimulatorRuntimeObjectPairCall)(
- intptr_t arg0, intptr_t arg1, intptr_t arg2, intptr_t arg3, intptr_t arg4,
- intptr_t arg5);
+
+static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
+ *x = pair->x;
+ *y = pair->y;
+}
+#else
+typedef uint64_t ObjectPair;
+
+
+static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
+#if V8_TARGET_BIG_ENDIAN
+ *x = static_cast<int32_t>(*pair >> 32);
+ *y = static_cast<int32_t>(*pair);
+#else
+ *x = static_cast<int32_t>(*pair);
+ *y = static_cast<int32_t>(*pair >> 32);
+#endif
+}
#endif
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in
+// runtime.cc uses the ObjectPair which is essentially two pointer
+// values stuffed into a structure. With the code below we assume that
+// all runtime calls return this pair. If they don't, the r4 result
+// register contains a bogus value, which is fine because it is
+// caller-saved.
+typedef ObjectPair (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+
// These prototypes handle the four types of FP calls.
typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
@@ -1203,7 +1207,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
Redirection* redirection = Redirection::FromSwiInstruction(instr);
const int kArgCount = 6;
int arg0_regnum = 3;
-#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
intptr_t result_buffer = 0;
if (redirection->type() == ExternalReference::BUILTIN_OBJECTPAIR_CALL) {
result_buffer = get_register(r3);
@@ -1396,56 +1400,19 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
-#if !V8_TARGET_ARCH_PPC64
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
- int64_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
-#if V8_TARGET_BIG_ENDIAN
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", hi_res);
- }
- set_register(r3, hi_res);
- set_register(r4, lo_res);
-#else
+ ObjectPair result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ intptr_t x;
+ intptr_t y;
+ decodeObjectPair(&result, &x, &y);
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(r3, lo_res);
- set_register(r4, hi_res);
-#endif
-#else
- if (redirection->type() == ExternalReference::BUILTIN_CALL) {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- intptr_t result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08" V8PRIxPTR "\n", result);
- }
- set_register(r3, result);
- } else {
- DCHECK(redirection->type() ==
- ExternalReference::BUILTIN_OBJECTPAIR_CALL);
- SimulatorRuntimeObjectPairCall target =
- reinterpret_cast<SimulatorRuntimeObjectPairCall>(external);
- struct ObjectPair result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08" V8PRIxPTR ", %08" V8PRIxPTR "\n", result.x,
- result.y);
- }
-#if ABI_RETURNS_OBJECT_PAIRS_IN_REGS
- set_register(r3, result.x);
- set_register(r4, result.y);
-#else
- memcpy(reinterpret_cast<void*>(result_buffer), &result,
- sizeof(struct ObjectPair));
-#endif
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
}
-#endif
+ set_register(r3, x);
+ set_register(r4, y);
}
set_pc(saved_lr);
break;
@@ -1455,11 +1422,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
dbg.Debug();
break;
}
- case kInfo: {
- PPCDebugger dbg(this);
- dbg.Info(instr);
- break;
- }
// stop uses all codes greater than 1 << 23.
default: {
if (svc >= (1 << 23)) {
@@ -1830,8 +1792,8 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- double* dptr = reinterpret_cast<double*>(ReadDW(ra_val + rb_val));
- set_d_register_from_double(frt, *dptr);
+ int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + rb_val));
+ set_d_register(frt, *dptr);
if (opcode == LFDUX) {
DCHECK(ra != 0);
set_register(ra, ra_val + rb_val);
@@ -1861,9 +1823,8 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- double frs_val = get_double_from_d_register(frs);
- int64_t* p = reinterpret_cast<int64_t*>(&frs_val);
- WriteDW(ra_val + rb_val, *p);
+ int64_t frs_val = get_d_register(frs);
+ WriteDW(ra_val + rb_val, frs_val);
if (opcode == STFDUX) {
DCHECK(ra != 0);
set_register(ra, ra_val + rb_val);
@@ -2015,7 +1976,20 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
if (instr->Bit(0)) { // RC bit set
SetCR0(static_cast<intptr_t>(alu_out));
}
- // todo - handle OE bit
+ break;
+ }
+ case MULHWUX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint32_t ra_val = (get_register(ra) & 0xFFFFFFFF);
+ uint32_t rb_val = (get_register(rb) & 0xFFFFFFFF);
+ uint64_t alu_out = (uint64_t)ra_val * (uint64_t)rb_val;
+ alu_out >>= 32;
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(static_cast<intptr_t>(alu_out));
+ }
break;
}
case NEGX: {
@@ -2074,18 +2048,16 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
DCHECK(!instr->Bit(0));
int frt = instr->RTValue();
int ra = instr->RAValue();
- double frt_val = get_double_from_d_register(frt);
- int64_t* p = reinterpret_cast<int64_t*>(&frt_val);
- set_register(ra, *p);
+ int64_t frt_val = get_d_register(frt);
+ set_register(ra, frt_val);
break;
}
case MFVSRWZ: {
DCHECK(!instr->Bit(0));
int frt = instr->RTValue();
int ra = instr->RAValue();
- double frt_val = get_double_from_d_register(frt);
- int64_t* p = reinterpret_cast<int64_t*>(&frt_val);
- set_register(ra, static_cast<uint32_t>(*p));
+ int64_t frt_val = get_d_register(frt);
+ set_register(ra, static_cast<uint32_t>(frt_val));
break;
}
case MTVSRD: {
@@ -2093,8 +2065,7 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
int frt = instr->RTValue();
int ra = instr->RAValue();
int64_t ra_val = get_register(ra);
- double* p = reinterpret_cast<double*>(&ra_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, ra_val);
break;
}
case MTVSRWA: {
@@ -2102,8 +2073,7 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
int frt = instr->RTValue();
int ra = instr->RAValue();
int64_t ra_val = static_cast<int32_t>(get_register(ra));
- double* p = reinterpret_cast<double*>(&ra_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, ra_val);
break;
}
case MTVSRWZ: {
@@ -2111,8 +2081,7 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
int frt = instr->RTValue();
int ra = instr->RAValue();
uint64_t ra_val = static_cast<uint32_t>(get_register(ra));
- double* p = reinterpret_cast<double*>(&ra_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, ra_val);
break;
}
#endif
@@ -2126,7 +2095,8 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
}
-void Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
+bool Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
+ bool found = true;
int opcode = instr->Bits(9, 1) << 1;
switch (opcode) {
case CNTLZWX: {
@@ -2344,6 +2314,29 @@ void Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
}
break;
}
+ case DIVWU: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint32_t ra_val = get_register(ra);
+ uint32_t rb_val = get_register(rb);
+ bool overflow = (rb_val == 0);
+ // result is undefined if divisor is zero
+ uint32_t alu_out = (overflow) ? -1 : ra_val / rb_val;
+ set_register(rt, alu_out);
+ if (instr->Bit(10)) { // OE bit set
+ if (overflow) {
+ special_reg_xer_ |= 0xC0000000; // set SO,OV
+ } else {
+ special_reg_xer_ &= ~0x40000000; // clear OV
+ }
+ }
+ if (instr->Bit(0)) { // RC bit set
+ bool setSO = (special_reg_xer_ & 0x80000000);
+ SetCR0(alu_out, setSO);
+ }
+ break;
+ }
#if V8_TARGET_ARCH_PPC64
case DIVD: {
int rt = instr->RTValue();
@@ -2366,6 +2359,21 @@ void Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
// todo - handle OE bit
break;
}
+ case DIVDU: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint64_t ra_val = get_register(ra);
+ uint64_t rb_val = get_register(rb);
+ // result is undefined if divisor is zero
+ uint64_t alu_out = (rb_val == 0) ? -1 : ra_val / rb_val;
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ // todo - handle OE bit
+ break;
+ }
#endif
case ADDX: {
int rt = instr->RTValue();
@@ -2408,6 +2416,19 @@ void Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
}
break;
}
+ case ORC: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ intptr_t alu_out = rs_val | ~rb_val;
+ set_register(ra, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ break;
+ }
case MFSPR: {
int rt = instr->RTValue();
int spr = instr->Bits(20, 11);
@@ -2497,6 +2518,15 @@ void Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
break;
}
#if V8_TARGET_ARCH_PPC64
+ case LWAX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadW(ra_val + rb_val, instr));
+ break;
+ }
case LDX:
case LDUX: {
int rt = instr->RTValue();
@@ -2556,11 +2586,50 @@ void Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
}
break;
}
+ case LHAX:
+ case LHAUX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadH(ra_val + rb_val, instr));
+ if (opcode == LHAUX) {
+ DCHECK(ra != 0 && ra != rt);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
case DCBF: {
// todo - simulate dcbf
break;
}
default: {
+ found = false;
+ break;
+ }
+ }
+
+ return found;
+}
+
+
+void Simulator::ExecuteExt2_5bit(Instruction* instr) {
+ int opcode = instr->Bits(5, 1) << 1;
+ switch (opcode) {
+ case ISEL: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int condition_bit = instr->RCValue();
+ int condition_mask = 0x80000000 >> condition_bit;
+ intptr_t ra_val = (ra == 0) ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ intptr_t value = (condition_reg_ & condition_mask) ? ra_val : rb_val;
+ set_register(rt, value);
+ break;
+ }
+ default: {
PrintF("Unimplemented: %08x\n", instr->InstructionBits());
UNIMPLEMENTED(); // Not used by V8.
}
@@ -2573,7 +2642,8 @@ void Simulator::ExecuteExt2(Instruction* instr) {
if (ExecuteExt2_10bit(instr)) return;
// Now look at the lesser encodings
if (ExecuteExt2_9bit_part1(instr)) return;
- ExecuteExt2_9bit_part2(instr);
+ if (ExecuteExt2_9bit_part2(instr)) return;
+ ExecuteExt2_5bit(instr);
}
@@ -2613,7 +2683,7 @@ void Simulator::ExecuteExt4(Instruction* instr) {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- double frt_val = std::sqrt(frb_val);
+ double frt_val = fast_sqrt(frb_val);
set_d_register_from_double(frt, frt_val);
return;
}
@@ -2690,13 +2760,58 @@ void Simulator::ExecuteExt4(Instruction* instr) {
condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
return;
}
+ case FRIN: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::round(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ if (instr->Bit(0)) { // RC bit set
+ // UNIMPLEMENTED();
+ }
+ return;
+ }
+ case FRIZ: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::trunc(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ if (instr->Bit(0)) { // RC bit set
+ // UNIMPLEMENTED();
+ }
+ return;
+ }
+ case FRIP: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::ceil(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ if (instr->Bit(0)) { // RC bit set
+ // UNIMPLEMENTED();
+ }
+ return;
+ }
+ case FRIM: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::floor(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ if (instr->Bit(0)) { // RC bit set
+ // UNIMPLEMENTED();
+ }
+ return;
+ }
case FRSP: {
int frt = instr->RTValue();
int frb = instr->RBValue();
+ // frsp round 8-byte double-precision value to
+ // single-precision value
double frb_val = get_double_from_d_register(frb);
- // frsp round 8-byte double-precision value to 8-byte
- // single-precision value, ignore the round here
- set_d_register_from_double(frt, frb_val);
+ double frt_val = static_cast<float>(frb_val);
+ set_d_register_from_double(frt, frt_val);
if (instr->Bit(0)) { // RC bit set
// UNIMPLEMENTED();
}
@@ -2821,9 +2936,8 @@ void Simulator::ExecuteExt4(Instruction* instr) {
case FMR: {
int frt = instr->RTValue();
int frb = instr->RBValue();
- double frb_val = get_double_from_d_register(frb);
- double frt_val = frb_val;
- set_d_register_from_double(frt, frt_val);
+ int64_t frb_val = get_d_register(frb);
+ set_d_register(frt, frb_val);
return;
}
case MTFSFI: {
@@ -2840,9 +2954,8 @@ void Simulator::ExecuteExt4(Instruction* instr) {
}
case MTFSF: {
int frb = instr->RBValue();
- double frb_dval = get_double_from_d_register(frb);
- int64_t* p = reinterpret_cast<int64_t*>(&frb_dval);
- int32_t frb_ival = static_cast<int32_t>((*p) & 0xffffffff);
+ int64_t frb_dval = get_d_register(frb);
+ int32_t frb_ival = static_cast<int32_t>((frb_dval)&0xffffffff);
int l = instr->Bits(25, 25);
if (l == 1) {
fp_condition_reg_ = frb_ival;
@@ -2859,8 +2972,7 @@ void Simulator::ExecuteExt4(Instruction* instr) {
case MFFS: {
int frt = instr->RTValue();
int64_t lval = static_cast<int64_t>(fp_condition_reg_);
- double* p = reinterpret_cast<double*>(&lval);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, lval);
return;
}
case FABS: {
@@ -2871,16 +2983,6 @@ void Simulator::ExecuteExt4(Instruction* instr) {
set_d_register_from_double(frt, frt_val);
return;
}
- case FRIM: {
- int frt = instr->RTValue();
- int frb = instr->RBValue();
- double frb_val = get_double_from_d_register(frb);
- int64_t floor_val = (int64_t)frb_val;
- if (floor_val > frb_val) floor_val--;
- double frt_val = static_cast<double>(floor_val);
- set_d_register_from_double(frt, frt_val);
- return;
- }
}
UNIMPLEMENTED(); // Not used by V8.
}
@@ -3365,7 +3467,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
uintptr_t result = ReadHU(ra_val + offset, instr) & 0xffff;
set_register(rt, result);
if (opcode == LHZU) {
- DCHECK(ra != 0);
set_register(ra, ra_val + offset);
}
break;
@@ -3373,7 +3474,15 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case LHA:
case LHAU: {
- UNIMPLEMENTED();
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t result = ReadH(ra_val + offset, instr);
+ set_register(rt, result);
+ if (opcode == LHAU) {
+ set_register(ra, ra_val + offset);
+ }
break;
}
@@ -3420,8 +3529,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- double* dptr = reinterpret_cast<double*>(ReadDW(ra_val + offset));
- set_d_register_from_double(frt, *dptr);
+ int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + offset));
+ set_d_register(frt, *dptr);
if (opcode == LFDU) {
DCHECK(ra != 0);
set_register(ra, ra_val + offset);
@@ -3451,9 +3560,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- double frs_val = get_double_from_d_register(frs);
- int64_t* p = reinterpret_cast<int64_t*>(&frs_val);
- WriteDW(ra_val + offset, *p);
+ int64_t frs_val = get_d_register(frs);
+ WriteDW(ra_val + offset, frs_val);
if (opcode == STFDU) {
DCHECK(ra != 0);
set_register(ra, ra_val + offset);
@@ -3515,27 +3623,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
#endif
- case FAKE_OPCODE: {
- if (instr->Bits(MARKER_SUBOPCODE_BIT, MARKER_SUBOPCODE_BIT) == 1) {
- int marker_code = instr->Bits(STUB_MARKER_HIGH_BIT, 0);
- DCHECK(marker_code < F_NEXT_AVAILABLE_STUB_MARKER);
- PrintF("Hit stub-marker: %d (EMIT_STUB_MARKER)\n", marker_code);
- } else {
- int fake_opcode = instr->Bits(FAKE_OPCODE_HIGH_BIT, 0);
- if (fake_opcode == fBKPT) {
- PPCDebugger dbg(this);
- PrintF("Simulator hit BKPT.\n");
- dbg.Debug();
- } else {
- DCHECK(fake_opcode < fLastFaker);
- PrintF("Hit ARM opcode: %d(FAKE_OPCODE defined in constant-ppc.h)\n",
- fake_opcode);
- UNIMPLEMENTED();
- }
- }
- break;
- }
-
default: {
UNIMPLEMENTED();
break;
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 98fe9a5351..cf338ccfdc 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -180,9 +180,20 @@ class Simulator {
double get_double_from_register_pair(int reg);
void set_d_register_from_double(int dreg, const double dbl) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
- fp_registers_[dreg] = dbl;
+ *bit_cast<double*>(&fp_registers_[dreg]) = dbl;
+ }
+ double get_double_from_d_register(int dreg) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ return *bit_cast<double*>(&fp_registers_[dreg]);
+ }
+ void set_d_register(int dreg, int64_t value) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ fp_registers_[dreg] = value;
+ }
+ int64_t get_d_register(int dreg) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ return fp_registers_[dreg];
}
- double get_double_from_d_register(int dreg) { return fp_registers_[dreg]; }
// Special case of set_register and get_register to access the raw PC value.
void set_pc(intptr_t value);
@@ -293,7 +304,8 @@ class Simulator {
void ExecuteExt1(Instruction* instr);
bool ExecuteExt2_10bit(Instruction* instr);
bool ExecuteExt2_9bit_part1(Instruction* instr);
- void ExecuteExt2_9bit_part2(Instruction* instr);
+ bool ExecuteExt2_9bit_part2(Instruction* instr);
+ void ExecuteExt2_5bit(Instruction* instr);
void ExecuteExt2(Instruction* instr);
void ExecuteExt4(Instruction* instr);
#if V8_TARGET_ARCH_PPC64
@@ -333,7 +345,7 @@ class Simulator {
intptr_t special_reg_ctr_;
int32_t special_reg_xer_;
- double fp_registers_[kNumFPRs];
+ int64_t fp_registers_[kNumFPRs];
// Simulator support.
char* stack_;
diff --git a/deps/v8/src/preparse-data-format.h b/deps/v8/src/preparse-data-format.h
index 4d1ad7abbf..391a351071 100644
--- a/deps/v8/src/preparse-data-format.h
+++ b/deps/v8/src/preparse-data-format.h
@@ -14,7 +14,7 @@ struct PreparseDataConstants {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 9;
+ static const unsigned kCurrentVersion = 10;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc
index c101493c9f..a66a1adcf9 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/preparse-data.cc
@@ -13,8 +13,7 @@ namespace v8 {
namespace internal {
-CompleteParserRecorder::CompleteParserRecorder()
- : function_store_(0) {
+CompleteParserRecorder::CompleteParserRecorder() {
preamble_[PreparseDataConstants::kMagicOffset] =
PreparseDataConstants::kMagicNumber;
preamble_[PreparseDataConstants::kVersionOffset] =
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index c1331d044f..0d784991c2 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -23,11 +23,9 @@ class ParserRecorder {
virtual ~ParserRecorder() { }
// Logs the scope and some details of a function literal in the source.
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties,
- StrictMode strict_mode) = 0;
+ virtual void LogFunction(int start, int end, int literals, int properties,
+ LanguageMode language_mode,
+ bool uses_super_property) = 0;
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
@@ -50,17 +48,16 @@ class SingletonLogger : public ParserRecorder {
void Reset() { has_error_ = false; }
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties,
- StrictMode strict_mode) {
+ virtual void LogFunction(int start, int end, int literals, int properties,
+ LanguageMode language_mode,
+ bool scope_uses_super_property) {
DCHECK(!has_error_);
start_ = start;
end_ = end;
literals_ = literals;
properties_ = properties;
- strict_mode_ = strict_mode;
+ language_mode_ = language_mode;
+ scope_uses_super_property_ = scope_uses_super_property;
}
// Logs an error message and marks the log as containing an error.
@@ -92,9 +89,13 @@ class SingletonLogger : public ParserRecorder {
DCHECK(!has_error_);
return properties_;
}
- StrictMode strict_mode() const {
+ LanguageMode language_mode() const {
DCHECK(!has_error_);
- return strict_mode_;
+ return language_mode_;
+ }
+ bool scope_uses_super_property() const {
+ DCHECK(!has_error_);
+ return scope_uses_super_property_;
}
int is_reference_error() const { return is_reference_error_; }
const char* message() {
@@ -113,7 +114,8 @@ class SingletonLogger : public ParserRecorder {
// For function entries.
int literals_;
int properties_;
- StrictMode strict_mode_;
+ LanguageMode language_mode_;
+ bool scope_uses_super_property_;
// For error messages.
const char* message_;
const char* argument_opt_;
@@ -131,16 +133,15 @@ class CompleteParserRecorder : public ParserRecorder {
CompleteParserRecorder();
virtual ~CompleteParserRecorder() {}
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties,
- StrictMode strict_mode) {
+ virtual void LogFunction(int start, int end, int literals, int properties,
+ LanguageMode language_mode,
+ bool scope_uses_super_property) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(literals);
function_store_.Add(properties);
- function_store_.Add(strict_mode);
+ function_store_.Add(language_mode);
+ function_store_.Add(scope_uses_super_property);
}
// Logs an error message and marks the log as containing an error.
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index b552676753..154a9ae527 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -80,6 +80,8 @@ PreParserExpression PreParserTraits::ExpressionFromString(
int pos, Scanner* scanner, PreParserFactory* factory) {
if (scanner->UnescapedLiteralMatches("use strict", 10)) {
return PreParserExpression::UseStrictStringLiteral();
+ } else if (scanner->UnescapedLiteralMatches("use strong", 10)) {
+ return PreParserExpression::UseStrongStringLiteral();
}
return PreParserExpression::StringLiteral();
}
@@ -102,18 +104,18 @@ PreParserExpression PreParserTraits::ParseFunctionLiteral(
PreParser::PreParseResult PreParser::PreParseLazyFunction(
- StrictMode strict_mode, bool is_generator, ParserRecorder* log) {
+ LanguageMode language_mode, FunctionKind kind, ParserRecorder* log) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
- PreParserScope top_scope(scope_, SCRIPT_SCOPE);
+ Scope* top_scope = NewScope(scope_, SCRIPT_SCOPE);
PreParserFactory top_factory(NULL);
- FunctionState top_state(&function_state_, &scope_, &top_scope, &top_factory);
- scope_->SetStrictMode(strict_mode);
- PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ FunctionState top_state(&function_state_, &scope_, top_scope, kNormalFunction,
+ &top_factory);
+ scope_->SetLanguageMode(language_mode);
+ Scope* function_scope = NewScope(scope_, FUNCTION_SCOPE);
PreParserFactory function_factory(NULL);
- FunctionState function_state(&function_state_, &scope_, &function_scope,
+ FunctionState function_state(&function_state_, &scope_, function_scope, kind,
&function_factory);
- function_state.set_is_generator(is_generator);
DCHECK_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
int start_position = peek_position();
@@ -123,7 +125,7 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
ReportUnexpectedToken(scanner()->current_token());
} else {
DCHECK_EQ(Token::RBRACE, scanner()->peek());
- if (scope_->strict_mode() == STRICT) {
+ if (is_strict(scope_->language_mode())) {
int end_pos = scanner()->location().end_pos;
CheckStrictOctalLiteral(start_position, end_pos, &ok);
}
@@ -154,24 +156,23 @@ PreParserExpression PreParserTraits::ParseClassLiteral(
// it is used) are generally omitted.
-#define CHECK_OK ok); \
- if (!*ok) return kUnknownSourceElements; \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-
-PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
+PreParser::Statement PreParser::ParseStatementListItem(bool* ok) {
+ // ECMA 262 6th Edition
+ // StatementListItem[Yield, Return] :
+ // Statement[?Yield, ?Return]
+ // Declaration[?Yield]
+ //
+ // Declaration[Yield] :
+ // HoistableDeclaration[?Yield]
+ // ClassDeclaration[?Yield]
+ // LexicalDeclaration[In, ?Yield]
//
- // In harmony mode we allow additionally the following productions
- // SourceElement:
- // LetDeclaration
- // ConstDeclaration
- // GeneratorDeclaration
+ // HoistableDeclaration[Yield, Default] :
+ // FunctionDeclaration[?Yield, ?Default]
+ // GeneratorDeclaration[?Yield, ?Default]
+ //
+ // LexicalDeclaration[In, Yield] :
+ // LetOrConst BindingList[?In, ?Yield] ;
switch (peek()) {
case Token::FUNCTION:
@@ -179,11 +180,11 @@ PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
case Token::CLASS:
return ParseClassDeclaration(ok);
case Token::CONST:
- return ParseVariableStatement(kSourceElement, ok);
+ return ParseVariableStatement(kStatementListItem, ok);
case Token::LET:
DCHECK(allow_harmony_scoping());
- if (strict_mode() == STRICT) {
- return ParseVariableStatement(kSourceElement, ok);
+ if (is_strict(language_mode())) {
+ return ParseVariableStatement(kStatementListItem, ok);
}
// Fall through.
default:
@@ -192,8 +193,7 @@ PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
}
-PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
- bool* ok) {
+void PreParser::ParseStatementList(int end_token, bool* ok) {
// SourceElements ::
// (Statement)* <end_token>
@@ -202,20 +202,23 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
if (directive_prologue && peek() != Token::STRING) {
directive_prologue = false;
}
- Statement statement = ParseSourceElement(CHECK_OK);
+ Statement statement = ParseStatementListItem(ok);
+ if (!*ok) return;
if (directive_prologue) {
if (statement.IsUseStrictLiteral()) {
- scope_->SetStrictMode(STRICT);
+ scope_->SetLanguageMode(
+ static_cast<LanguageMode>(scope_->language_mode() | STRICT_BIT));
+ } else if (statement.IsUseStrongLiteral() && allow_strong_mode()) {
+ scope_->SetLanguageMode(static_cast<LanguageMode>(
+ scope_->language_mode() | STRICT_BIT | STRONG_BIT));
} else if (!statement.IsStringLiteral()) {
directive_prologue = false;
}
}
}
- return kUnknownSourceElements;
}
-#undef CHECK_OK
#define CHECK_OK ok); \
if (!*ok) return Statement::Default(); \
((void)0
@@ -225,6 +228,14 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
PreParser::Statement PreParser::ParseStatement(bool* ok) {
// Statement ::
+ // EmptyStatement
+ // ...
+ return ParseSubStatement(ok);
+}
+
+
+PreParser::Statement PreParser::ParseSubStatement(bool* ok) {
+ // Statement ::
// Block
// VariableStatement
// EmptyStatement
@@ -254,6 +265,12 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
return ParseBlock(ok);
case Token::SEMICOLON:
+ if (is_strong(language_mode())) {
+ PreParserTraits::ReportMessageAt(scanner()->peek_location(),
+ "strong_empty");
+ *ok = false;
+ return Statement::Default();
+ }
Next();
return Statement::Default();
@@ -294,7 +311,7 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
Scanner::Location start_location = scanner()->peek_location();
Statement statement = ParseFunctionDeclaration(CHECK_OK);
Scanner::Location end_location = scanner()->location();
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
PreParserTraits::ReportMessageAt(start_location.beg_pos,
end_location.end_pos,
"strict_function");
@@ -305,22 +322,21 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
}
}
- case Token::CLASS:
- return ParseClassDeclaration(CHECK_OK);
-
case Token::DEBUGGER:
return ParseDebuggerStatement(ok);
case Token::VAR:
- case Token::CONST:
return ParseVariableStatement(kStatement, ok);
- case Token::LET:
- DCHECK(allow_harmony_scoping());
- if (strict_mode() == STRICT) {
+ case Token::CONST:
+ // In ES6 CONST is not allowed as a Statement, only as a
+ // LexicalDeclaration, however we continue to allow it in sloppy mode for
+ // backwards compatibility.
+ if (is_sloppy(language_mode())) {
return ParseVariableStatement(kStatement, ok);
}
- // Fall through.
+
+ // Fall through.
default:
return ParseExpressionOrLabelledStatement(ok);
}
@@ -350,7 +366,7 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
Expect(Token::CLASS, CHECK_OK);
- if (!allow_harmony_sloppy() && strict_mode() == SLOPPY) {
+ if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
ReportMessage("sloppy_lexical");
*ok = false;
return Statement::Default();
@@ -375,8 +391,8 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
- if (allow_harmony_scoping() && strict_mode() == STRICT) {
- ParseSourceElement(CHECK_OK);
+ if (allow_harmony_scoping() && is_strict(language_mode())) {
+ ParseStatementListItem(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
}
@@ -427,6 +443,12 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
bool require_initializer = false;
bool is_strict_const = false;
if (peek() == Token::VAR) {
+ if (is_strong(language_mode())) {
+ Scanner::Location location = scanner()->peek_location();
+ ReportMessageAt(location, "strong_var");
+ *ok = false;
+ return Statement::Default();
+ }
Consume(Token::VAR);
} else if (peek() == Token::CONST) {
// TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
@@ -440,29 +462,20 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- if (strict_mode() == STRICT) {
- if (allow_harmony_scoping()) {
- if (var_context != kSourceElement && var_context != kForStatement) {
- ReportMessageAt(scanner()->peek_location(), "unprotected_const");
- *ok = false;
- return Statement::Default();
- }
- is_strict_const = true;
- require_initializer = var_context != kForStatement;
- } else {
+ if (is_strict(language_mode())) {
+ DCHECK(var_context != kStatement);
+ if (!allow_harmony_scoping()) {
Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location, "strict_const");
*ok = false;
return Statement::Default();
}
+ is_strict_const = true;
+ require_initializer = var_context != kForStatement;
}
- } else if (peek() == Token::LET && strict_mode() == STRICT) {
+ } else if (peek() == Token::LET && is_strict(language_mode())) {
Consume(Token::LET);
- if (var_context != kSourceElement && var_context != kForStatement) {
- ReportMessageAt(scanner()->peek_location(), "unprotected_let");
- *ok = false;
- return Statement::Default();
- }
+ DCHECK(var_context != kStatement);
} else {
*ok = false;
return Statement::Default();
@@ -497,6 +510,22 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
// Expression ';'
// Identifier ':' Statement
+ switch (peek()) {
+ case Token::FUNCTION:
+ case Token::LBRACE:
+ UNREACHABLE(); // Always handled by the callers.
+ case Token::CLASS:
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return Statement::Default();
+
+ // TODO(arv): Handle `let [`
+ // https://code.google.com/p/v8/issues/detail?id=3847
+
+ default:
+ break;
+ }
+
bool starts_with_identifier = peek_any_identifier();
Expression expr = ParseExpression(true, CHECK_OK);
// Even if the expression starts with an identifier, it is not necessarily an
@@ -506,7 +535,7 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
// Expression is a single identifier, and not, e.g., a parenthesized
// identifier.
DCHECK(!expr.AsIdentifier().IsFutureReserved());
- DCHECK(strict_mode() == SLOPPY ||
+ DCHECK(is_sloppy(language_mode()) ||
!IsFutureStrictReserved(expr.AsIdentifier()));
Consume(Token::COLON);
return ParseStatement(ok);
@@ -516,7 +545,7 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
}
// Parsed expression statement.
// Detect attempts at 'let' declarations in sloppy mode.
- if (peek() == Token::IDENTIFIER && strict_mode() == SLOPPY &&
+ if (peek() == Token::IDENTIFIER && is_sloppy(language_mode()) &&
expr.IsIdentifier() && expr.AsIdentifier().IsLet()) {
ReportMessage("sloppy_lexical", NULL);
*ok = false;
@@ -535,10 +564,10 @@ PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- ParseStatement(CHECK_OK);
+ ParseSubStatement(CHECK_OK);
if (peek() == Token::ELSE) {
Next();
- ParseStatement(CHECK_OK);
+ ParseSubStatement(CHECK_OK);
}
return Statement::Default();
}
@@ -610,7 +639,7 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
ReportMessageAt(scanner()->location(), "strict_mode_with");
*ok = false;
return Statement::Default();
@@ -619,9 +648,9 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- PreParserScope with_scope(scope_, WITH_SCOPE);
- BlockState block_state(&scope_, &with_scope);
- ParseStatement(CHECK_OK);
+ Scope* with_scope = NewScope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, with_scope);
+ ParseSubStatement(CHECK_OK);
return Statement::Default();
}
@@ -663,7 +692,7 @@ PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
// 'do' Statement 'while' '(' Expression ')' ';'
Expect(Token::DO, CHECK_OK);
- ParseStatement(CHECK_OK);
+ ParseSubStatement(CHECK_OK);
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
@@ -681,20 +710,11 @@ PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- ParseStatement(ok);
+ ParseSubStatement(ok);
return Statement::Default();
}
-bool PreParser::CheckInOrOf(bool accept_OF) {
- if (Check(Token::IN) ||
- (accept_OF && CheckContextualKeyword(CStrVector("of")))) {
- return true;
- }
- return false;
-}
-
-
PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
@@ -703,10 +723,11 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
bool is_let_identifier_expression = false;
if (peek() != Token::SEMICOLON) {
+ ForEachStatement::VisitMode visit_mode;
if (peek() == Token::VAR || peek() == Token::CONST ||
- (peek() == Token::LET && strict_mode() == STRICT)) {
+ (peek() == Token::LET && is_strict(language_mode()))) {
bool is_lexical = peek() == Token::LET ||
- (peek() == Token::CONST && strict_mode() == STRICT);
+ (peek() == Token::CONST && is_strict(language_mode()));
int decl_count;
VariableDeclarationProperties decl_props = kHasNoInitializers;
ParseVariableDeclarations(
@@ -714,22 +735,22 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
bool has_initializers = decl_props == kHasInitializers;
bool accept_IN = decl_count == 1 && !(is_lexical && has_initializers);
bool accept_OF = !has_initializers;
- if (accept_IN && CheckInOrOf(accept_OF)) {
+ if (accept_IN && CheckInOrOf(accept_OF, &visit_mode, ok)) {
+ if (!*ok) return Statement::Default();
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
-
- ParseStatement(CHECK_OK);
+ ParseSubStatement(CHECK_OK);
return Statement::Default();
}
} else {
Expression lhs = ParseExpression(false, CHECK_OK);
is_let_identifier_expression =
lhs.IsIdentifier() && lhs.AsIdentifier().IsLet();
- if (CheckInOrOf(lhs.IsIdentifier())) {
+ if (CheckInOrOf(lhs.IsIdentifier(), &visit_mode, ok)) {
+ if (!*ok) return Statement::Default();
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
-
- ParseStatement(CHECK_OK);
+ ParseSubStatement(CHECK_OK);
return Statement::Default();
}
}
@@ -737,7 +758,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// Parsed initializer at this point.
// Detect attempts at 'let' declarations in sloppy mode.
- if (peek() == Token::IDENTIFIER && strict_mode() == SLOPPY &&
+ if (peek() == Token::IDENTIFIER && is_sloppy(language_mode()) &&
is_let_identifier_expression) {
ReportMessage("sloppy_lexical", NULL);
*ok = false;
@@ -755,7 +776,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
Expect(Token::RPAREN, CHECK_OK);
- ParseStatement(ok);
+ ParseSubStatement(ok);
return Statement::Default();
}
@@ -804,8 +825,8 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
{
- PreParserScope with_scope(scope_, WITH_SCOPE);
- BlockState block_state(&scope_, &with_scope);
+ Scope* with_scope = NewScope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, with_scope);
ParseBlock(CHECK_OK);
}
tok = peek();
@@ -848,12 +869,11 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// '(' FormalParameterList? ')' '{' FunctionBody '}'
// Parse function body.
- ScopeType outer_scope_type = scope_->type();
- PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ bool outer_is_script_scope = scope_->is_script_scope();
+ Scope* function_scope = NewScope(scope_, FUNCTION_SCOPE);
PreParserFactory factory(NULL);
- FunctionState function_state(&function_state_, &scope_, &function_scope,
+ FunctionState function_state(&function_state_, &scope_, function_scope, kind,
&factory);
- function_state.set_is_generator(IsGeneratorFunction(kind));
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
@@ -866,11 +886,17 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
Scanner::Location dupe_error_loc = Scanner::Location::invalid();
Scanner::Location reserved_error_loc = Scanner::Location::invalid();
+ bool is_rest = false;
bool done = arity_restriction == FunctionLiteral::GETTER_ARITY ||
(peek() == Token::RPAREN &&
arity_restriction != FunctionLiteral::SETTER_ARITY);
while (!done) {
bool is_strict_reserved = false;
+ is_rest = peek() == Token::ELLIPSIS && allow_harmony_rest_params();
+ if (is_rest) {
+ Consume(Token::ELLIPSIS);
+ }
+
Identifier param_name =
ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
if (!eval_args_error_loc.IsValid() && param_name.IsEvalOrArguments()) {
@@ -888,54 +914,41 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
if (arity_restriction == FunctionLiteral::SETTER_ARITY) break;
done = (peek() == Token::RPAREN);
- if (!done) Expect(Token::COMMA, CHECK_OK);
+ if (!done) {
+ if (is_rest) {
+ ReportMessageAt(scanner()->peek_location(), "param_after_rest");
+ *ok = false;
+ return Expression::Default();
+ }
+ Expect(Token::COMMA, CHECK_OK);
+ }
}
Expect(Token::RPAREN, CHECK_OK);
// See Parser::ParseFunctionLiteral for more information about lazy parsing
// and lazy compilation.
- bool is_lazily_parsed = (outer_scope_type == SCRIPT_SCOPE && allow_lazy() &&
- !parenthesized_function_);
+ bool is_lazily_parsed =
+ (outer_is_script_scope && allow_lazy() && !parenthesized_function_);
parenthesized_function_ = false;
Expect(Token::LBRACE, CHECK_OK);
if (is_lazily_parsed) {
ParseLazyFunctionLiteralBody(CHECK_OK);
} else {
- ParseSourceElements(Token::RBRACE, ok);
+ ParseStatementList(Token::RBRACE, CHECK_OK);
}
Expect(Token::RBRACE, CHECK_OK);
- // Validate strict mode. We can do this only after parsing the function,
- // since the function can declare itself strict.
- // Concise methods use StrictFormalParameters.
- if (strict_mode() == STRICT || IsConciseMethod(kind)) {
- if (function_name.IsEvalOrArguments()) {
- ReportMessageAt(function_name_location, "strict_eval_arguments");
- *ok = false;
- return Expression::Default();
- }
- if (name_is_strict_reserved) {
- ReportMessageAt(function_name_location, "unexpected_strict_reserved");
- *ok = false;
- return Expression::Default();
- }
- if (eval_args_error_loc.IsValid()) {
- ReportMessageAt(eval_args_error_loc, "strict_eval_arguments");
- *ok = false;
- return Expression::Default();
- }
- if (dupe_error_loc.IsValid()) {
- ReportMessageAt(dupe_error_loc, "strict_param_dupe");
- *ok = false;
- return Expression::Default();
- }
- if (reserved_error_loc.IsValid()) {
- ReportMessageAt(reserved_error_loc, "unexpected_strict_reserved");
- *ok = false;
- return Expression::Default();
- }
+ // Validate name and parameter names. We can do this only after parsing the
+ // function, since the function can declare itself strict.
+ CheckFunctionName(language_mode(), kind, function_name,
+ name_is_strict_reserved, function_name_location, CHECK_OK);
+ const bool use_strict_params = is_rest || IsConciseMethod(kind);
+ CheckFunctionParameterNames(language_mode(), use_strict_params,
+ eval_args_error_loc, dupe_error_loc,
+ reserved_error_loc, CHECK_OK);
+ if (is_strict(language_mode())) {
int end_position = scanner()->location().end_pos;
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
}
@@ -946,7 +959,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
int body_start = position();
- ParseSourceElements(Token::RBRACE, ok);
+ ParseStatementList(Token::RBRACE, ok);
if (!*ok) return;
// Position right after terminal '}'.
@@ -954,8 +967,8 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
int body_end = scanner()->peek_location().end_pos;
log_->LogFunction(body_start, body_end,
function_state_->materialized_literal_count(),
- function_state_->expected_property_count(),
- strict_mode());
+ function_state_->expected_property_count(), language_mode(),
+ scope_->uses_super_property());
}
@@ -974,15 +987,19 @@ PreParserExpression PreParser::ParseClassLiteral(
return EmptyExpression();
}
- PreParserScope scope = NewScope(scope_, BLOCK_SCOPE);
- BlockState block_state(&scope_, &scope);
- scope_->SetStrictMode(STRICT);
- scope_->SetScopeName(name);
+ Scope* scope = NewScope(scope_, BLOCK_SCOPE);
+ BlockState block_state(&scope_, scope);
+ scope_->SetLanguageMode(
+ static_cast<LanguageMode>(scope_->language_mode() | STRICT_BIT));
+ // TODO(marja): Make PreParser use scope names too.
+ // scope_->SetScopeName(name);
- if (Check(Token::EXTENDS)) {
+ bool has_extends = Check(Token::EXTENDS);
+ if (has_extends) {
ParseLeftHandSideExpression(CHECK_OK);
}
+ ClassLiteralChecker checker(this);
bool has_seen_constructor = false;
Expect(Token::LBRACE, CHECK_OK);
@@ -990,8 +1007,10 @@ PreParserExpression PreParser::ParseClassLiteral(
if (Check(Token::SEMICOLON)) continue;
const bool in_class = true;
const bool is_static = false;
- ParsePropertyDefinition(NULL, in_class, is_static, &has_seen_constructor,
- CHECK_OK);
+ bool is_computed_name = false; // Classes do not care about computed
+ // property names here.
+ ParsePropertyDefinition(&checker, in_class, has_extends, is_static,
+ &is_computed_name, &has_seen_constructor, CHECK_OK);
}
Expect(Token::RBRACE, CHECK_OK);
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index 5fb60d00bb..f7f532372b 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -42,7 +42,6 @@ namespace internal {
// // Used by FunctionState and BlockState.
// typedef Scope;
// typedef GeneratorVariable;
-// typedef Zone;
// // Return types for traversing functions.
// typedef Identifier;
// typedef Expression;
@@ -68,18 +67,20 @@ class ParserBase : public Traits {
typedef typename Traits::Type::Literal LiteralT;
typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
- ParserBase(Scanner* scanner, uintptr_t stack_limit, v8::Extension* extension,
- ParserRecorder* log, typename Traits::Type::Zone* zone,
- typename Traits::Type::Parser this_object)
+ ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
+ v8::Extension* extension, AstValueFactory* ast_value_factory,
+ ParserRecorder* log, typename Traits::Type::Parser this_object)
: Traits(this_object),
parenthesized_function_(false),
scope_(NULL),
function_state_(NULL),
extension_(extension),
fni_(NULL),
+ ast_value_factory_(ast_value_factory),
log_(log),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
stack_limit_(stack_limit),
+ zone_(zone),
scanner_(scanner),
stack_overflow_(false),
allow_lazy_(false),
@@ -87,7 +88,9 @@ class ParserBase : public Traits {
allow_harmony_arrow_functions_(false),
allow_harmony_object_literals_(false),
allow_harmony_sloppy_(false),
- zone_(zone) {}
+ allow_harmony_computed_property_names_(false),
+ allow_harmony_rest_params_(false),
+ allow_strong_mode_(false) {}
// Getters that indicate whether certain syntactical constructs are
// allowed to be parsed by this instance of the parser.
@@ -108,6 +111,14 @@ class ParserBase : public Traits {
bool allow_harmony_templates() const { return scanner()->HarmonyTemplates(); }
bool allow_harmony_sloppy() const { return allow_harmony_sloppy_; }
bool allow_harmony_unicode() const { return scanner()->HarmonyUnicode(); }
+ bool allow_harmony_computed_property_names() const {
+ return allow_harmony_computed_property_names_;
+ }
+ bool allow_harmony_rest_params() const {
+ return allow_harmony_rest_params_;
+ }
+
+ bool allow_strong_mode() const { return allow_strong_mode_; }
// Setters that determine whether certain syntactical constructs are
// allowed to be parsed by this instance of the parser.
@@ -140,6 +151,13 @@ class ParserBase : public Traits {
void set_allow_harmony_unicode(bool allow) {
scanner()->SetHarmonyUnicode(allow);
}
+ void set_allow_harmony_computed_property_names(bool allow) {
+ allow_harmony_computed_property_names_ = allow;
+ }
+ void set_allow_harmony_rest_params(bool allow) {
+ allow_harmony_rest_params_ = allow;
+ }
+ void set_allow_strong_mode(bool allow) { allow_strong_mode_ = allow; }
protected:
enum AllowEvalOrArgumentsAsIdentifier {
@@ -152,8 +170,17 @@ class ParserBase : public Traits {
PARSE_EAGERLY
};
+ enum VariableDeclarationContext {
+ kStatementListItem,
+ kStatement,
+ kForStatement
+ };
+
+ // If a list of variable declarations includes any initializers.
+ enum VariableDeclarationProperties { kHasInitializers, kHasNoInitializers };
+
class Checkpoint;
- class ObjectLiteralChecker;
+ class ObjectLiteralCheckerBase;
// ---------------------------------------------------------------------------
// FunctionState and BlockState together implement the parser's scope stack.
@@ -162,26 +189,22 @@ class ParserBase : public Traits {
// used to hold the parser's per-function and per-block state.
class BlockState BASE_EMBEDDED {
public:
- BlockState(typename Traits::Type::Scope** scope_stack,
- typename Traits::Type::Scope* scope)
- : scope_stack_(scope_stack),
- outer_scope_(*scope_stack),
- scope_(scope) {
+ BlockState(Scope** scope_stack, Scope* scope)
+ : scope_stack_(scope_stack), outer_scope_(*scope_stack), scope_(scope) {
*scope_stack_ = scope_;
}
~BlockState() { *scope_stack_ = outer_scope_; }
private:
- typename Traits::Type::Scope** scope_stack_;
- typename Traits::Type::Scope* outer_scope_;
- typename Traits::Type::Scope* scope_;
+ Scope** scope_stack_;
+ Scope* outer_scope_;
+ Scope* scope_;
};
class FunctionState BASE_EMBEDDED {
public:
- FunctionState(FunctionState** function_state_stack,
- typename Traits::Type::Scope** scope_stack,
- typename Traits::Type::Scope* scope,
+ FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
+ Scope* scope, FunctionKind kind,
typename Traits::Type::Factory* factory);
~FunctionState();
@@ -198,15 +221,16 @@ class ParserBase : public Traits {
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
- void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
- bool is_generator() const { return is_generator_; }
+ bool is_generator() const { return IsGeneratorFunction(kind_); }
+
+ FunctionKind kind() const { return kind_; }
+ FunctionState* outer() const { return outer_function_state_; }
void set_generator_object_variable(
typename Traits::Type::GeneratorVariable* variable) {
DCHECK(variable != NULL);
- DCHECK(!is_generator());
+ DCHECK(is_generator());
generator_object_variable_ = variable;
- is_generator_ = true;
}
typename Traits::Type::GeneratorVariable* generator_object_variable()
const {
@@ -227,8 +251,7 @@ class ParserBase : public Traits {
// Properties count estimation.
int expected_property_count_;
- // Whether the function is a generator.
- bool is_generator_;
+ FunctionKind kind_;
// For generators, this variable may hold the generator object. It variable
// is used by yield expressions and return statements. It is not necessary
// for generator functions to have this variable set.
@@ -236,9 +259,8 @@ class ParserBase : public Traits {
FunctionState** function_state_stack_;
FunctionState* outer_function_state_;
- typename Traits::Type::Scope** scope_stack_;
- typename Traits::Type::Scope* outer_scope_;
- typename Traits::Type::Zone* extra_param_;
+ Scope** scope_stack_;
+ Scope* outer_scope_;
typename Traits::Type::Factory* factory_;
friend class ParserTraits;
@@ -289,13 +311,27 @@ class ParserBase : public Traits {
Mode old_mode_;
};
+ Scope* NewScope(Scope* parent, ScopeType scope_type,
+ FunctionKind kind = kNormalFunction) {
+ DCHECK(ast_value_factory());
+ DCHECK(scope_type != MODULE_SCOPE || allow_harmony_modules());
+ DCHECK((scope_type == FUNCTION_SCOPE && IsValidFunctionKind(kind)) ||
+ kind == kNormalFunction);
+ Scope* result =
+ new (zone()) Scope(zone(), parent, scope_type, ast_value_factory());
+ bool uninitialized_this = IsSubclassConstructor(kind);
+ result->Initialize(uninitialized_this);
+ return result;
+ }
+
Scanner* scanner() const { return scanner_; }
+ AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
int position() { return scanner_->location().beg_pos; }
int peek_position() { return scanner_->peek_location().beg_pos; }
bool stack_overflow() const { return stack_overflow_; }
void set_stack_overflow() { stack_overflow_ = true; }
Mode mode() const { return mode_; }
- typename Traits::Type::Zone* zone() const { return zone_; }
+ Zone* zone() const { return zone_; }
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
@@ -384,6 +420,23 @@ class ParserBase : public Traits {
}
}
+ bool CheckInOrOf(
+ bool accept_OF, ForEachStatement::VisitMode* visit_mode, bool* ok) {
+ if (Check(Token::IN)) {
+ if (is_strong(language_mode())) {
+ ReportMessageAt(scanner()->location(), "strong_for_in");
+ *ok = false;
+ } else {
+ *visit_mode = ForEachStatement::ENUMERATE;
+ }
+ return true;
+ } else if (accept_OF && CheckContextualKeyword(CStrVector("of"))) {
+ *visit_mode = ForEachStatement::ITERATE;
+ return true;
+ }
+ return false;
+ }
+
// Checks whether an octal literal was last seen between beg_pos and end_pos.
// If so, reports an error. Only called for strict mode and template strings.
void CheckOctalLiteral(int beg_pos, int end_pos, const char* error,
@@ -405,17 +458,18 @@ class ParserBase : public Traits {
CheckOctalLiteral(beg_pos, end_pos, "template_octal_literal", ok);
}
- // Validates strict mode for function parameter lists. This has to be
- // done after parsing the function, since the function can declare
- // itself strict.
- void CheckStrictFunctionNameAndParameters(
- IdentifierT function_name,
- bool function_name_is_strict_reserved,
- const Scanner::Location& function_name_loc,
- const Scanner::Location& eval_args_error_loc,
- const Scanner::Location& dupe_error_loc,
- const Scanner::Location& reserved_loc,
- bool* ok) {
+ // Checking the name of a function literal. This has to be done after parsing
+ // the function, since the function can declare itself strict.
+ void CheckFunctionName(LanguageMode language_mode, FunctionKind kind,
+ IdentifierT function_name,
+ bool function_name_is_strict_reserved,
+ const Scanner::Location& function_name_loc,
+ bool* ok) {
+ // Property names are never checked.
+ if (IsConciseMethod(kind) || IsAccessorFunction(kind)) return;
+ // The function name needs to be checked in strict mode.
+ if (is_sloppy(language_mode)) return;
+
if (this->IsEvalOrArguments(function_name)) {
Traits::ReportMessageAt(function_name_loc, "strict_eval_arguments");
*ok = false;
@@ -426,11 +480,25 @@ class ParserBase : public Traits {
*ok = false;
return;
}
- if (eval_args_error_loc.IsValid()) {
+ }
+
+ // Checking the parameter names of a function literal. This has to be done
+ // after parsing the function, since the function can declare itself strict.
+ void CheckFunctionParameterNames(LanguageMode language_mode,
+ bool strict_params,
+ const Scanner::Location& eval_args_error_loc,
+ const Scanner::Location& dupe_error_loc,
+ const Scanner::Location& reserved_loc,
+ bool* ok) {
+ if (is_sloppy(language_mode) && !strict_params) return;
+
+ if (is_strict(language_mode) && eval_args_error_loc.IsValid()) {
Traits::ReportMessageAt(eval_args_error_loc, "strict_eval_arguments");
*ok = false;
return;
}
+ // TODO(arv): When we add support for destructuring in setters we also need
+ // to check for duplicate names.
if (dupe_error_loc.IsValid()) {
Traits::ReportMessageAt(dupe_error_loc, "strict_param_dupe");
*ok = false;
@@ -454,7 +522,7 @@ class ParserBase : public Traits {
return function_state_->factory();
}
- StrictMode strict_mode() { return scope_->strict_mode(); }
+ LanguageMode language_mode() { return scope_->language_mode(); }
bool is_generator() const { return function_state_->is_generator(); }
// Report syntax errors.
@@ -499,13 +567,14 @@ class ParserBase : public Traits {
ExpressionT ParsePrimaryExpression(bool* ok);
ExpressionT ParseExpression(bool accept_IN, bool* ok);
ExpressionT ParseArrayLiteral(bool* ok);
- IdentifierT ParsePropertyName(bool* is_get, bool* is_set, bool* is_static,
+ ExpressionT ParsePropertyName(IdentifierT* name, bool* is_get, bool* is_set,
+ bool* is_static, bool* is_computed_name,
bool* ok);
ExpressionT ParseObjectLiteral(bool* ok);
- ObjectLiteralPropertyT ParsePropertyDefinition(ObjectLiteralChecker* checker,
- bool in_class, bool is_static,
- bool* has_seen_constructor,
- bool* ok);
+ ObjectLiteralPropertyT ParsePropertyDefinition(
+ ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
+ bool is_static, bool* is_computed_name, bool* has_seen_constructor,
+ bool* ok);
typename Traits::Type::ExpressionList ParseArguments(bool* ok);
ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
ExpressionT ParseYieldExpression(bool* ok);
@@ -522,6 +591,7 @@ class ParserBase : public Traits {
bool* ok);
ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool* ok);
void AddTemplateExpression(ExpressionT);
+ ExpressionT ParseSuperExpression(bool is_new, bool* ok);
// Checks if the expression is a valid reference expression (e.g., on the
// left-hand side of assignments). Although ruled out by ECMA as early errors,
@@ -530,58 +600,63 @@ class ParserBase : public Traits {
ExpressionT expression,
Scanner::Location location, const char* message, bool* ok);
- // Used to detect duplicates in object literals. Each of the values
- // kGetterProperty, kSetterProperty and kValueProperty represents
- // a type of object literal property. When parsing a property, its
- // type value is stored in the DuplicateFinder for the property name.
- // Values are chosen so that having intersection bits means the there is
- // an incompatibility.
- // I.e., you can add a getter to a property that already has a setter, since
- // kGetterProperty and kSetterProperty doesn't intersect, but not if it
- // already has a getter or a value. Adding the getter to an existing
- // setter will store the value (kGetterProperty | kSetterProperty), which
- // is incompatible with adding any further properties.
+ // Used to validate property names in object literals and class literals
enum PropertyKind {
- kNone = 0,
- // Bit patterns representing different object literal property types.
- kGetterProperty = 1,
- kSetterProperty = 2,
- kValueProperty = 7,
- // Helper constants.
- kValueFlag = 4
+ kAccessorProperty,
+ kValueProperty,
+ kMethodProperty
};
- // Validation per ECMA 262 - 11.1.5 "Object Initializer".
- class ObjectLiteralChecker {
+ class ObjectLiteralCheckerBase {
public:
- ObjectLiteralChecker(ParserBase* parser, StrictMode strict_mode)
- : parser_(parser),
- finder_(scanner()->unicode_cache()),
- strict_mode_(strict_mode) {}
+ explicit ObjectLiteralCheckerBase(ParserBase* parser) : parser_(parser) {}
- void CheckProperty(Token::Value property, PropertyKind type, bool* ok);
+ virtual void CheckProperty(Token::Value property, PropertyKind type,
+ bool is_static, bool is_generator, bool* ok) = 0;
- private:
+ virtual ~ObjectLiteralCheckerBase() {}
+
+ protected:
ParserBase* parser() const { return parser_; }
Scanner* scanner() const { return parser_->scanner(); }
- // Checks the type of conflict based on values coming from PropertyType.
- bool HasConflict(PropertyKind type1, PropertyKind type2) {
- return (type1 & type2) != 0;
- }
- bool IsDataDataConflict(PropertyKind type1, PropertyKind type2) {
- return ((type1 & type2) & kValueFlag) != 0;
- }
- bool IsDataAccessorConflict(PropertyKind type1, PropertyKind type2) {
- return ((type1 ^ type2) & kValueFlag) != 0;
+ private:
+ ParserBase* parser_;
+ };
+
+ // Validation per ES6 object literals.
+ class ObjectLiteralChecker : public ObjectLiteralCheckerBase {
+ public:
+ explicit ObjectLiteralChecker(ParserBase* parser)
+ : ObjectLiteralCheckerBase(parser), has_seen_proto_(false) {}
+
+ void CheckProperty(Token::Value property, PropertyKind type, bool is_static,
+ bool is_generator, bool* ok) OVERRIDE;
+
+ private:
+ bool IsProto() { return this->scanner()->LiteralMatches("__proto__", 9); }
+
+ bool has_seen_proto_;
+ };
+
+ // Validation per ES6 class literals.
+ class ClassLiteralChecker : public ObjectLiteralCheckerBase {
+ public:
+ explicit ClassLiteralChecker(ParserBase* parser)
+ : ObjectLiteralCheckerBase(parser), has_seen_constructor_(false) {}
+
+ void CheckProperty(Token::Value property, PropertyKind type, bool is_static,
+ bool is_generator, bool* ok) OVERRIDE;
+
+ private:
+ bool IsConstructor() {
+ return this->scanner()->LiteralMatches("constructor", 11);
}
- bool IsAccessorAccessorConflict(PropertyKind type1, PropertyKind type2) {
- return ((type1 | type2) & kValueFlag) == 0;
+ bool IsPrototype() {
+ return this->scanner()->LiteralMatches("prototype", 9);
}
- ParserBase* parser_;
- DuplicateFinder finder_;
- StrictMode strict_mode_;
+ bool has_seen_constructor_;
};
// If true, the next (and immediately following) function literal is
@@ -590,15 +665,18 @@ class ParserBase : public Traits {
// so never lazily compile it.
bool parenthesized_function_;
- typename Traits::Type::Scope* scope_; // Scope stack.
+ Scope* scope_; // Scope stack.
FunctionState* function_state_; // Function state stack.
v8::Extension* extension_;
FuncNameInferrer* fni_;
+ AstValueFactory* ast_value_factory_; // Not owned.
ParserRecorder* log_;
Mode mode_;
uintptr_t stack_limit_;
private:
+ Zone* zone_;
+
Scanner* scanner_;
bool stack_overflow_;
@@ -607,8 +685,9 @@ class ParserBase : public Traits {
bool allow_harmony_arrow_functions_;
bool allow_harmony_object_literals_;
bool allow_harmony_sloppy_;
-
- typename Traits::Type::Zone* zone_; // Only used by Parser.
+ bool allow_harmony_computed_property_names_;
+ bool allow_harmony_rest_params_;
+ bool allow_strong_mode_;
};
@@ -646,17 +725,13 @@ class PreParserIdentifier {
return PreParserIdentifier(kConstructorIdentifier);
}
bool IsEval() const { return type_ == kEvalIdentifier; }
- bool IsArguments(const AstValueFactory* = NULL) const {
- return type_ == kArgumentsIdentifier;
- }
+ bool IsArguments() const { return type_ == kArgumentsIdentifier; }
+ bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
bool IsLet() const { return type_ == kLetIdentifier; }
bool IsStatic() const { return type_ == kStaticIdentifier; }
bool IsYield() const { return type_ == kYieldIdentifier; }
bool IsPrototype() const { return type_ == kPrototypeIdentifier; }
bool IsConstructor() const { return type_ == kConstructorIdentifier; }
- bool IsEvalOrArguments() const {
- return type_ == kEvalIdentifier || type_ == kArgumentsIdentifier;
- }
bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; }
bool IsFutureStrictReserved() const {
return type_ == kFutureStrictReservedIdentifier ||
@@ -695,7 +770,6 @@ class PreParserIdentifier {
Type type_;
friend class PreParserExpression;
- friend class PreParserScope;
};
@@ -729,8 +803,7 @@ class PreParserExpression {
}
static PreParserExpression StringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
- IsUseStrictField::encode(false));
+ return PreParserExpression(TypeField::encode(kStringLiteralExpression));
}
static PreParserExpression UseStrictStringLiteral() {
@@ -738,6 +811,11 @@ class PreParserExpression {
IsUseStrictField::encode(true));
}
+ static PreParserExpression UseStrongStringLiteral() {
+ return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
+ IsUseStrongField::encode(true));
+ }
+
static PreParserExpression This() {
return PreParserExpression(TypeField::encode(kExpression) |
ExpressionTypeField::encode(kThisExpression));
@@ -789,6 +867,11 @@ class PreParserExpression {
IsUseStrictField::decode(code_);
}
+ bool IsUseStrongLiteral() const {
+ return TypeField::decode(code_) == kStringLiteralExpression &&
+ IsUseStrongField::decode(code_);
+ }
+
bool IsThis() const {
return TypeField::decode(code_) == kExpression &&
ExpressionTypeField::decode(code_) == kThisExpression;
@@ -897,6 +980,7 @@ class PreParserExpression {
typedef BitField<ExpressionType, ParenthesizationField::kNext, 3>
ExpressionTypeField;
typedef BitField<bool, ParenthesizationField::kNext, 1> IsUseStrictField;
+ typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseStrongField;
typedef BitField<bool, ParenthesizationField::kNext, 1>
IsValidArrowParamListField;
typedef BitField<PreParserIdentifier::Type, ParenthesizationField::kNext, 10>
@@ -938,6 +1022,9 @@ class PreParserStatement {
if (expression.IsUseStrictLiteral()) {
return PreParserStatement(kUseStrictExpressionStatement);
}
+ if (expression.IsUseStrongLiteral()) {
+ return PreParserStatement(kUseStrongExpressionStatement);
+ }
if (expression.IsStringLiteral()) {
return PreParserStatement(kStringLiteralExpressionStatement);
}
@@ -952,6 +1039,8 @@ class PreParserStatement {
return code_ == kUseStrictExpressionStatement;
}
+ bool IsUseStrongLiteral() { return code_ == kUseStrongExpressionStatement; }
+
bool IsFunctionDeclaration() {
return code_ == kFunctionDeclaration;
}
@@ -961,6 +1050,7 @@ class PreParserStatement {
kUnknownStatement,
kStringLiteralExpressionStatement,
kUseStrictExpressionStatement,
+ kUseStrongExpressionStatement,
kFunctionDeclaration
};
@@ -981,42 +1071,6 @@ class PreParserStatementList {
};
-class PreParserScope {
- public:
- explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type,
- void* = NULL)
- : scope_type_(scope_type) {
- strict_mode_ = outer_scope ? outer_scope->strict_mode() : SLOPPY;
- }
-
- ScopeType type() { return scope_type_; }
- StrictMode strict_mode() const { return strict_mode_; }
- void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
- void SetScopeName(PreParserIdentifier name) {}
-
- // When PreParser is in use, lazy compilation is already being done,
- // things cannot get lazier than that.
- bool AllowsLazyCompilation() const { return false; }
-
- void set_start_position(int position) {}
- void set_end_position(int position) {}
-
- bool IsDeclared(const PreParserIdentifier& identifier) const { return false; }
- void DeclareParameter(const PreParserIdentifier& identifier, VariableMode) {}
- void RecordArgumentsUsage() {}
- void RecordSuperPropertyUsage() {}
- void RecordSuperConstructorCallUsage() {}
- void RecordThisUsage() {}
-
- // Allow scope->Foo() to work.
- PreParserScope* operator->() { return this; }
-
- private:
- ScopeType scope_type_;
- StrictMode strict_mode_;
-};
-
-
class PreParserFactory {
public:
explicit PreParserFactory(void* unused_value_factory) {}
@@ -1039,14 +1093,17 @@ class PreParserFactory {
int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewObjectLiteralProperty(bool is_getter,
+ PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
PreParserExpression value,
- int pos, bool is_static) {
+ ObjectLiteralProperty::Kind kind,
+ bool is_static,
+ bool is_computed_name) {
return PreParserExpression::Default();
}
PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
PreParserExpression value,
- bool is_static) {
+ bool is_static,
+ bool is_computed_name) {
return PreParserExpression::Default();
}
PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
@@ -1122,9 +1179,8 @@ class PreParserFactory {
}
PreParserExpression NewFunctionLiteral(
PreParserIdentifier name, AstValueFactory* ast_value_factory,
- const PreParserScope& scope, PreParserStatementList body,
- int materialized_literal_count, int expected_property_count,
- int handler_count, int parameter_count,
+ Scope* scope, PreParserStatementList body, int materialized_literal_count,
+ int expected_property_count, int handler_count, int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
@@ -1152,18 +1208,10 @@ class PreParserTraits {
// it needs.
typedef PreParser* Parser;
- // Used by FunctionState and BlockState.
- typedef PreParserScope Scope;
- typedef PreParserScope ScopePtr;
- inline static Scope* ptr_to_scope(ScopePtr& scope) { return &scope; }
-
// PreParser doesn't need to store generator variables.
typedef void GeneratorVariable;
- // No interaction with Zones.
- typedef void Zone;
typedef int AstProperties;
- typedef Vector<PreParserIdentifier> ParameterIdentifierVector;
// Return types for traversing functions.
typedef PreParserIdentifier Identifier;
@@ -1184,6 +1232,14 @@ class PreParserTraits {
explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
// Helper functions for recursive descent.
+ static bool IsEval(PreParserIdentifier identifier) {
+ return identifier.IsEval();
+ }
+
+ static bool IsArguments(PreParserIdentifier identifier) {
+ return identifier.IsArguments();
+ }
+
static bool IsEvalOrArguments(PreParserIdentifier identifier) {
return identifier.IsEvalOrArguments();
}
@@ -1232,11 +1288,13 @@ class PreParserTraits {
// PreParser should not use FuncNameInferrer.
UNREACHABLE();
}
+
static void PushPropertyName(FuncNameInferrer* fni,
PreParserExpression expression) {
// PreParser should not use FuncNameInferrer.
UNREACHABLE();
}
+
static void InferFunctionName(FuncNameInferrer* fni,
PreParserExpression expression) {
// PreParser should not use FuncNameInferrer.
@@ -1244,15 +1302,14 @@ class PreParserTraits {
}
static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
- PreParserScope* scope, PreParserExpression property, bool* has_function) {
- }
+ Scope* scope, PreParserExpression property, bool* has_function) {}
static void CheckAssigningFunctionLiteralToProperty(
PreParserExpression left, PreParserExpression right) {}
// PreParser doesn't need to keep track of eval calls.
static void CheckPossibleEvalCall(PreParserExpression expression,
- PreParserScope* scope) {}
+ Scope* scope) {}
static PreParserExpression MarkExpressionAsAssigned(
PreParserExpression expression) {
@@ -1286,9 +1343,6 @@ class PreParserTraits {
const char* type, Handle<Object> arg, int pos) {
return PreParserExpression::Default();
}
- PreParserScope NewScope(PreParserScope* outer_scope, ScopeType scope_type) {
- return PreParserScope(outer_scope, scope_type);
- }
// Reporting errors.
void ReportMessageAt(Scanner::Location location,
@@ -1341,19 +1395,19 @@ class PreParserTraits {
return PreParserIdentifier::Default();
}
- static PreParserExpression ThisExpression(PreParserScope* scope,
- PreParserFactory* factory) {
+ static PreParserExpression ThisExpression(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
return PreParserExpression::This();
}
- static PreParserExpression SuperReference(PreParserScope* scope,
+ static PreParserExpression SuperReference(Scope* scope,
PreParserFactory* factory) {
return PreParserExpression::Super();
}
- static PreParserExpression DefaultConstructor(bool call_super,
- PreParserScope* scope, int pos,
- int end_pos) {
+ static PreParserExpression DefaultConstructor(bool call_super, Scope* scope,
+ int pos, int end_pos) {
return PreParserExpression::Default();
}
@@ -1364,7 +1418,7 @@ class PreParserTraits {
}
static PreParserExpression ExpressionFromIdentifier(
- PreParserIdentifier name, int pos, PreParserScope* scope,
+ PreParserIdentifier name, int pos, Scope* scope,
PreParserFactory* factory) {
return PreParserExpression::FromIdentifier(name);
}
@@ -1378,15 +1432,15 @@ class PreParserTraits {
return PreParserExpression::Default();
}
- static PreParserExpressionList NewExpressionList(int size, void* zone) {
+ static PreParserExpressionList NewExpressionList(int size, Zone* zone) {
return PreParserExpressionList();
}
- static PreParserStatementList NewStatementList(int size, void* zone) {
+ static PreParserStatementList NewStatementList(int size, Zone* zone) {
return PreParserStatementList();
}
- static PreParserExpressionList NewPropertyList(int size, void* zone) {
+ static PreParserExpressionList NewPropertyList(int size, Zone* zone) {
return PreParserExpressionList();
}
@@ -1397,13 +1451,13 @@ class PreParserTraits {
}
V8_INLINE PreParserStatementList
- ParseEagerFunctionBody(PreParserIdentifier function_name, int pos,
- Variable* fvar, Token::Value fvar_init_op,
- bool is_generator, bool* ok);
+ ParseEagerFunctionBody(PreParserIdentifier function_name, int pos,
+ Variable* fvar, Token::Value fvar_init_op,
+ FunctionKind kind, bool* ok);
// Utility functions
int DeclareArrowParametersFromExpression(PreParserExpression expression,
- PreParserScope* scope,
+ Scope* scope,
Scanner::Location* dupe_loc,
bool* ok) {
// TODO(aperez): Detect duplicated identifiers in paramlists.
@@ -1434,9 +1488,8 @@ class PreParserTraits {
static bool IsTaggedTemplate(const PreParserExpression tag) {
return !tag.IsNoTemplateTag();
}
- static AstValueFactory* ast_value_factory() { return NULL; }
- void CheckConflictingVarDeclarations(PreParserScope scope, bool* ok) {}
+ void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
// Temporary glue; these functions will move to ParserBase.
PreParserExpression ParseV8Intrinsic(bool* ok);
@@ -1479,25 +1532,27 @@ class PreParser : public ParserBase<PreParserTraits> {
kPreParseSuccess
};
- PreParser(Scanner* scanner, ParserRecorder* log, uintptr_t stack_limit)
- : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, log, NULL,
- this) {}
+ PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
+ ParserRecorder* log, uintptr_t stack_limit)
+ : ParserBase<PreParserTraits>(zone, scanner, stack_limit, NULL,
+ ast_value_factory, log, this) {}
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
PreParseResult PreParseProgram(int* materialized_literals = 0) {
- PreParserScope scope(scope_, SCRIPT_SCOPE);
+ Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
PreParserFactory factory(NULL);
- FunctionState top_scope(&function_state_, &scope_, &scope, &factory);
+ FunctionState top_scope(&function_state_, &scope_, scope, kNormalFunction,
+ &factory);
bool ok = true;
int start_position = scanner()->peek_location().beg_pos;
- ParseSourceElements(Token::EOS, &ok);
+ ParseStatementList(Token::EOS, &ok);
if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
ReportUnexpectedToken(scanner()->current_token());
- } else if (scope_->strict_mode() == STRICT) {
+ } else if (is_strict(scope_->language_mode())) {
CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
&ok);
}
@@ -1515,9 +1570,8 @@ class PreParser : public ParserBase<PreParserTraits> {
// keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseLazyFunction(StrictMode strict_mode,
- bool is_generator,
- ParserRecorder* log);
+ PreParseResult PreParseLazyFunction(LanguageMode language_mode,
+ FunctionKind kind, ParserRecorder* log);
private:
friend class PreParserTraits;
@@ -1527,30 +1581,14 @@ class PreParser : public ParserBase<PreParserTraits> {
// are either being counted in the preparser data, or is important
// to throw the correct syntax error exceptions.
- enum VariableDeclarationContext {
- kSourceElement,
- kStatement,
- kForStatement
- };
-
- // If a list of variable declarations includes any initializers.
- enum VariableDeclarationProperties {
- kHasInitializers,
- kHasNoInitializers
- };
-
-
- enum SourceElements {
- kUnknownSourceElements
- };
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
// for failure at the call sites.
- Statement ParseSourceElement(bool* ok);
- SourceElements ParseSourceElements(int end_token, bool* ok);
+ Statement ParseStatementListItem(bool* ok);
+ void ParseStatementList(int end_token, bool* ok);
Statement ParseStatement(bool* ok);
+ Statement ParseSubStatement(bool* ok);
Statement ParseFunctionDeclaration(bool* ok);
Statement ParseClassDeclaration(bool* ok);
Statement ParseBlock(bool* ok);
@@ -1581,9 +1619,9 @@ class PreParser : public ParserBase<PreParserTraits> {
int* materialized_literal_count,
int* expected_property_count, bool* ok);
V8_INLINE PreParserStatementList
- ParseEagerFunctionBody(PreParserIdentifier function_name, int pos,
- Variable* fvar, Token::Value fvar_init_op,
- bool is_generator, bool* ok);
+ ParseEagerFunctionBody(PreParserIdentifier function_name, int pos,
+ Variable* fvar, Token::Value fvar_init_op,
+ FunctionKind kind, bool* ok);
Expression ParseFunctionLiteral(
Identifier name, Scanner::Location function_name_location,
@@ -1596,8 +1634,6 @@ class PreParser : public ParserBase<PreParserTraits> {
Scanner::Location class_name_location,
bool name_is_strict_reserved, int pos,
bool* ok);
-
- bool CheckInOrOf(bool accept_OF);
};
@@ -1609,10 +1645,10 @@ void PreParserTraits::MaterializeTemplateCallsiteLiterals() {
PreParserStatementList PreParser::ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos, Variable* fvar,
- Token::Value fvar_init_op, bool is_generator, bool* ok) {
+ Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
- ParseSourceElements(Token::RBRACE, ok);
+ ParseStatementList(Token::RBRACE, ok);
if (!*ok) return PreParserStatementList();
Expect(Token::RBRACE, ok);
@@ -1622,22 +1658,20 @@ PreParserStatementList PreParser::ParseEagerFunctionBody(
PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos, Variable* fvar,
- Token::Value fvar_init_op, bool is_generator, bool* ok) {
+ Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
return pre_parser_->ParseEagerFunctionBody(function_name, pos, fvar,
- fvar_init_op, is_generator, ok);
+ fvar_init_op, kind, ok);
}
template <class Traits>
ParserBase<Traits>::FunctionState::FunctionState(
- FunctionState** function_state_stack,
- typename Traits::Type::Scope** scope_stack,
- typename Traits::Type::Scope* scope,
- typename Traits::Type::Factory* factory)
+ FunctionState** function_state_stack, Scope** scope_stack, Scope* scope,
+ FunctionKind kind, typename Traits::Type::Factory* factory)
: next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
next_handler_index_(0),
expected_property_count_(0),
- is_generator_(false),
+ kind_(kind),
generator_object_variable_(NULL),
function_state_stack_(function_state_stack),
outer_function_state_(*function_state_stack),
@@ -1676,8 +1710,10 @@ void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
case Token::STATIC:
case Token::YIELD:
case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessageAt(source_location, strict_mode() == SLOPPY
- ? "unexpected_token_identifier" : "unexpected_strict_reserved");
+ return ReportMessageAt(source_location,
+ is_strict(language_mode())
+ ? "unexpected_strict_reserved"
+ : "unexpected_token_identifier");
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
return Traits::ReportMessageAt(source_location,
@@ -1697,15 +1733,20 @@ typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
Token::Value next = Next();
if (next == Token::IDENTIFIER) {
IdentifierT name = this->GetSymbol(scanner());
- if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
- strict_mode() == STRICT && this->IsEvalOrArguments(name)) {
- ReportMessage("strict_eval_arguments");
- *ok = false;
+ if (allow_eval_or_arguments == kDontAllowEvalOrArguments) {
+ if (is_strict(language_mode()) && this->IsEvalOrArguments(name)) {
+ ReportMessage("strict_eval_arguments");
+ *ok = false;
+ }
+ } else {
+ if (is_strong(language_mode()) && this->IsArguments(name)) {
+ ReportMessage("strong_arguments");
+ *ok = false;
+ }
}
- if (name->IsArguments(this->ast_value_factory()))
- scope_->RecordArgumentsUsage();
+ if (this->IsArguments(name)) scope_->RecordArgumentsUsage();
return name;
- } else if (strict_mode() == SLOPPY &&
+ } else if (is_sloppy(language_mode()) &&
(next == Token::FUTURE_STRICT_RESERVED_WORD ||
next == Token::LET || next == Token::STATIC ||
(next == Token::YIELD && !is_generator()))) {
@@ -1736,8 +1777,7 @@ typename ParserBase<Traits>::IdentifierT ParserBase<
}
IdentifierT name = this->GetSymbol(scanner());
- if (name->IsArguments(this->ast_value_factory()))
- scope_->RecordArgumentsUsage();
+ if (this->IsArguments(name)) scope_->RecordArgumentsUsage();
return name;
}
@@ -1755,8 +1795,7 @@ ParserBase<Traits>::ParseIdentifierName(bool* ok) {
}
IdentifierT name = this->GetSymbol(scanner());
- if (name->IsArguments(this->ast_value_factory()))
- scope_->RecordArgumentsUsage();
+ if (this->IsArguments(name)) scope_->RecordArgumentsUsage();
return name;
}
@@ -1837,7 +1876,7 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
case Token::THIS: {
Consume(Token::THIS);
scope_->RecordThisUsage();
- result = this->ThisExpression(scope_, factory());
+ result = this->ThisExpression(scope_, factory(), pos);
break;
}
@@ -1902,7 +1941,7 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
case Token::CLASS: {
Consume(Token::CLASS);
- if (!allow_harmony_sloppy() && strict_mode() == SLOPPY) {
+ if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
ReportMessage("sloppy_lexical", NULL);
*ok = false;
break;
@@ -1997,34 +2036,69 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
template <class Traits>
-typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParsePropertyName(
- bool* is_get, bool* is_set, bool* is_static, bool* ok) {
- Token::Value next = peek();
- switch (next) {
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
+ IdentifierT* name, bool* is_get, bool* is_set, bool* is_static,
+ bool* is_computed_name, bool* ok) {
+ Token::Value token = peek();
+ int pos = peek_position();
+
+ // For non computed property names we normalize the name a bit:
+ //
+ // "12" -> 12
+ // 12.3 -> "12.3"
+ // 12.30 -> "12.3"
+ // identifier -> "identifier"
+ //
+ // This is important because we use the property name as a key in a hash
+ // table when we compute constant properties.
+ switch (token) {
case Token::STRING:
Consume(Token::STRING);
- return this->GetSymbol(scanner_);
+ *name = this->GetSymbol(scanner());
+ break;
+
case Token::NUMBER:
Consume(Token::NUMBER);
- return this->GetNumberAsSymbol(scanner_);
+ *name = this->GetNumberAsSymbol(scanner());
+ break;
+
+ case Token::LBRACK:
+ if (allow_harmony_computed_property_names_) {
+ *is_computed_name = true;
+ Consume(Token::LBRACK);
+ ExpressionT expression = ParseAssignmentExpression(true, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
+ return expression;
+ }
+
+ // Fall through.
case Token::STATIC:
*is_static = true;
- // Fall through.
+
+ // Fall through.
default:
- return ParseIdentifierNameOrGetOrSet(is_get, is_set, ok);
+ *name = ParseIdentifierNameOrGetOrSet(is_get, is_set, CHECK_OK);
+ break;
}
- UNREACHABLE();
- return this->EmptyIdentifier();
+
+ uint32_t index;
+ return this->IsArrayIndex(*name, &index)
+ ? factory()->NewNumberLiteral(index, pos)
+ : factory()->NewStringLiteral(*name, pos);
}
template <class Traits>
-typename ParserBase<Traits>::ObjectLiteralPropertyT ParserBase<
- Traits>::ParsePropertyDefinition(ObjectLiteralChecker* checker,
- bool in_class, bool is_static,
- bool* has_seen_constructor, bool* ok) {
- DCHECK(!in_class || is_static || has_seen_constructor != NULL);
+typename ParserBase<Traits>::ObjectLiteralPropertyT
+ParserBase<Traits>::ParsePropertyDefinition(ObjectLiteralCheckerBase* checker,
+ bool in_class, bool has_extends,
+ bool is_static,
+ bool* is_computed_name,
+ bool* has_seen_constructor,
+ bool* ok) {
+ DCHECK(!in_class || is_static || has_seen_constructor != nullptr);
ExpressionT value = this->EmptyExpression();
+ IdentifierT name = this->EmptyIdentifier();
bool is_get = false;
bool is_set = false;
bool name_is_static = false;
@@ -2032,16 +2106,19 @@ typename ParserBase<Traits>::ObjectLiteralPropertyT ParserBase<
Token::Value name_token = peek();
int next_pos = peek_position();
- IdentifierT name =
- ParsePropertyName(&is_get, &is_set, &name_is_static,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ ExpressionT name_expression = ParsePropertyName(
+ &name, &is_get, &is_set, &name_is_static, is_computed_name,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- if (fni_ != NULL) this->PushLiteralName(fni_, name);
+ if (fni_ != nullptr && !*is_computed_name) {
+ this->PushLiteralName(fni_, name);
+ }
if (!in_class && !is_generator && peek() == Token::COLON) {
// PropertyDefinition : PropertyName ':' AssignmentExpression
- if (checker != NULL) {
- checker->CheckProperty(name_token, kValueProperty,
+ if (!*is_computed_name) {
+ checker->CheckProperty(name_token, kValueProperty, is_static,
+ is_generator,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
}
Consume(Token::COLON);
@@ -2051,36 +2128,19 @@ typename ParserBase<Traits>::ObjectLiteralPropertyT ParserBase<
} else if (is_generator ||
(allow_harmony_object_literals_ && peek() == Token::LPAREN)) {
// Concise Method
-
- if (is_static && this->IsPrototype(name)) {
- ReportMessageAt(scanner()->location(), "static_prototype");
- *ok = false;
- return this->EmptyObjectLiteralProperty();
+ if (!*is_computed_name) {
+ checker->CheckProperty(name_token, kMethodProperty, is_static,
+ is_generator,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
}
FunctionKind kind = is_generator ? FunctionKind::kConciseGeneratorMethod
: FunctionKind::kConciseMethod;
if (in_class && !is_static && this->IsConstructor(name)) {
- if (is_generator) {
- ReportMessageAt(scanner()->location(), "constructor_special_method");
- *ok = false;
- return this->EmptyObjectLiteralProperty();
- }
-
- if (*has_seen_constructor) {
- ReportMessageAt(scanner()->location(), "duplicate_constructor");
- *ok = false;
- return this->EmptyObjectLiteralProperty();
- }
-
*has_seen_constructor = true;
- kind = FunctionKind::kNormalFunction;
- }
-
- if (checker != NULL) {
- checker->CheckProperty(name_token, kValueProperty,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ kind = has_extends ? FunctionKind::kSubclassConstructor
+ : FunctionKind::kBaseConstructor;
}
value = this->ParseFunctionLiteral(
@@ -2090,47 +2150,59 @@ typename ParserBase<Traits>::ObjectLiteralPropertyT ParserBase<
FunctionLiteral::NORMAL_ARITY,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ return factory()->NewObjectLiteralProperty(name_expression, value,
+ ObjectLiteralProperty::COMPUTED,
+ is_static, *is_computed_name);
+
} else if (in_class && name_is_static && !is_static) {
// static MethodDefinition
- return ParsePropertyDefinition(checker, true, true, NULL, ok);
-
+ return ParsePropertyDefinition(checker, true, has_extends, true,
+ is_computed_name, nullptr, ok);
} else if (is_get || is_set) {
// Accessor
+ name = this->EmptyIdentifier();
bool dont_care = false;
name_token = peek();
- name = ParsePropertyName(&dont_care, &dont_care, &dont_care,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- // Validate the property.
- if (is_static && this->IsPrototype(name)) {
- ReportMessageAt(scanner()->location(), "static_prototype");
- *ok = false;
- return this->EmptyObjectLiteralProperty();
- } else if (in_class && !is_static && this->IsConstructor(name)) {
- ReportMessageAt(scanner()->location(), "constructor_special_method");
- *ok = false;
- return this->EmptyObjectLiteralProperty();
- }
- if (checker != NULL) {
- checker->CheckProperty(name_token,
- is_get ? kGetterProperty : kSetterProperty,
+ name_expression = ParsePropertyName(
+ &name, &dont_care, &dont_care, &dont_care, is_computed_name,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+ if (!*is_computed_name) {
+ checker->CheckProperty(name_token, kAccessorProperty, is_static,
+ is_generator,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
}
typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
name, scanner()->location(),
false, // reserved words are allowed here
- FunctionKind::kNormalFunction, RelocInfo::kNoPosition,
+ FunctionKind::kAccessorFunction, RelocInfo::kNoPosition,
FunctionLiteral::ANONYMOUS_EXPRESSION,
is_get ? FunctionLiteral::GETTER_ARITY : FunctionLiteral::SETTER_ARITY,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- return factory()->NewObjectLiteralProperty(is_get, value, next_pos,
- is_static);
+
+ // Make sure the name expression is a string since we need a Name for
+ // Runtime_DefineAccessorPropertyUnchecked and since we can determine this
+ // statically we can skip the extra runtime check.
+ if (!*is_computed_name) {
+ name_expression =
+ factory()->NewStringLiteral(name, name_expression->position());
+ }
+
+ return factory()->NewObjectLiteralProperty(
+ name_expression, value,
+ is_get ? ObjectLiteralProperty::GETTER : ObjectLiteralProperty::SETTER,
+ is_static, *is_computed_name);
} else if (!in_class && allow_harmony_object_literals_ &&
- Token::IsIdentifier(name_token, strict_mode(),
+ Token::IsIdentifier(name_token, language_mode(),
this->is_generator())) {
+ DCHECK(!*is_computed_name);
+ DCHECK(!is_static);
value = this->ExpressionFromIdentifier(name, next_pos, scope_, factory());
+ return factory()->NewObjectLiteralProperty(
+ name_expression, value, ObjectLiteralProperty::COMPUTED, false, false);
} else {
Token::Value next = Next();
@@ -2139,12 +2211,8 @@ typename ParserBase<Traits>::ObjectLiteralPropertyT ParserBase<
return this->EmptyObjectLiteralProperty();
}
- uint32_t index;
- LiteralT key = this->IsArrayIndex(name, &index)
- ? factory()->NewNumberLiteral(index, next_pos)
- : factory()->NewStringLiteral(name, next_pos);
-
- return factory()->NewObjectLiteralProperty(key, value, is_static);
+ return factory()->NewObjectLiteralProperty(name_expression, value, is_static,
+ *is_computed_name);
}
@@ -2159,18 +2227,25 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
this->NewPropertyList(4, zone_);
int number_of_boilerplate_properties = 0;
bool has_function = false;
-
- ObjectLiteralChecker checker(this, strict_mode());
+ bool has_computed_names = false;
+ ObjectLiteralChecker checker(this);
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
- if (fni_ != NULL) fni_->Enter();
+ if (fni_ != nullptr) fni_->Enter();
const bool in_class = false;
const bool is_static = false;
+ const bool has_extends = false;
+ bool is_computed_name = false;
ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
- &checker, in_class, is_static, NULL, CHECK_OK);
+ &checker, in_class, has_extends, is_static, &is_computed_name, NULL,
+ CHECK_OK);
+
+ if (is_computed_name) {
+ has_computed_names = true;
+ }
// Mark top-level object literals that contain function literals and
// pretenure the literal so it can be added as a constant function
@@ -2179,7 +2254,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
&has_function);
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (this->IsBoilerplateProperty(property)) {
+ if (!has_computed_names && this->IsBoilerplateProperty(property)) {
number_of_boilerplate_properties++;
}
properties->Add(property, zone());
@@ -2189,7 +2264,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
Expect(Token::COMMA, CHECK_OK);
}
- if (fni_ != NULL) {
+ if (fni_ != nullptr) {
fni_->Infer();
fni_->Leave();
}
@@ -2387,6 +2462,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
Token::Value op = Next();
+ Scanner::Location op_location = scanner()->location();
int pos = position();
ExpressionT y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
@@ -2406,6 +2482,11 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
+ if (cmp == Token::EQ && is_strong(language_mode())) {
+ ReportMessageAt(op_location, "strong_equal");
+ *ok = false;
+ return this->EmptyExpression();
+ }
x = factory()->NewCompareOperation(cmp, x, y, pos);
if (cmp != op) {
// The comparison was negated - add a NOT.
@@ -2443,12 +2524,17 @@ ParserBase<Traits>::ParseUnaryExpression(bool* ok) {
int pos = position();
ExpressionT expression = ParseUnaryExpression(CHECK_OK);
- // "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && strict_mode() == STRICT &&
- this->IsIdentifier(expression)) {
- ReportMessage("strict_delete");
- *ok = false;
- return this->EmptyExpression();
+ if (op == Token::DELETE && is_strict(language_mode())) {
+ if (is_strong(language_mode())) {
+ ReportMessage("strong_delete");
+ *ok = false;
+ return this->EmptyExpression();
+ } else if (this->IsIdentifier(expression)) {
+ // "delete identifier" is a syntax error in strict mode.
+ ReportMessage("strict_delete");
+ *ok = false;
+ return this->EmptyExpression();
+ }
}
// Allow Traits do rewrite the expression.
@@ -2609,8 +2695,9 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(bool* ok) {
Consume(Token::NEW);
int new_pos = position();
ExpressionT result = this->EmptyExpression();
- if (Check(Token::SUPER)) {
- result = this->SuperReference(scope_, factory());
+ if (peek() == Token::SUPER) {
+ const bool is_new = true;
+ result = ParseSuperExpression(is_new, CHECK_OK);
} else {
result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
}
@@ -2667,21 +2754,8 @@ ParserBase<Traits>::ParseMemberExpression(bool* ok) {
function_token_position, function_type, FunctionLiteral::NORMAL_ARITY,
CHECK_OK);
} else if (peek() == Token::SUPER) {
- int beg_pos = position();
- Consume(Token::SUPER);
- Token::Value next = peek();
- if (next == Token::PERIOD || next == Token::LBRACK) {
- scope_->RecordSuperPropertyUsage();
- result = this->SuperReference(scope_, factory());
- } else if (next == Token::LPAREN) {
- scope_->RecordSuperConstructorCallUsage();
- result = this->SuperReference(scope_, factory());
- } else {
- ReportMessageAt(Scanner::Location(beg_pos, position()),
- "unexpected_super");
- *ok = false;
- return this->EmptyExpression();
- }
+ const bool is_new = false;
+ result = ParseSuperExpression(is_new, CHECK_OK);
} else {
result = ParsePrimaryExpression(CHECK_OK);
}
@@ -2693,6 +2767,37 @@ ParserBase<Traits>::ParseMemberExpression(bool* ok) {
template <class Traits>
typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseSuperExpression(bool is_new, bool* ok) {
+ Expect(Token::SUPER, CHECK_OK);
+
+ FunctionState* function_state = function_state_;
+ while (IsArrowFunction(function_state->kind())) {
+ function_state = function_state->outer();
+ }
+ // TODO(arv): Handle eval scopes similarly.
+
+ FunctionKind kind = function_state->kind();
+ if (IsConciseMethod(kind) || IsAccessorFunction(kind) ||
+ i::IsConstructor(kind)) {
+ if (peek() == Token::PERIOD || peek() == Token::LBRACK) {
+ scope_->RecordSuperPropertyUsage();
+ return this->SuperReference(scope_, factory());
+ }
+ // new super() is never allowed.
+ // super() is only allowed in derived constructor
+ if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
+ return this->SuperReference(scope_, factory());
+ }
+ }
+
+ ReportMessageAt(scanner()->location(), "unexpected_super");
+ *ok = false;
+ return this->EmptyExpression();
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseMemberExpressionContinuation(ExpressionT expression,
bool* ok) {
// Parses this part of MemberExpression:
@@ -2731,10 +2836,11 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(ExpressionT expression,
template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<
- Traits>::ParseArrowFunctionLiteral(int start_pos, ExpressionT params_ast,
- bool* ok) {
- typename Traits::Type::ScopePtr scope = this->NewScope(scope_, ARROW_SCOPE);
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseArrowFunctionLiteral(int start_pos,
+ ExpressionT params_ast,
+ bool* ok) {
+ Scope* scope = this->NewScope(scope_, ARROW_SCOPE);
typename Traits::Type::StatementList body;
int num_parameters = -1;
int materialized_literal_count = -1;
@@ -2742,11 +2848,11 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
int handler_count = 0;
{
- typename Traits::Type::Factory function_factory(this->ast_value_factory());
- FunctionState function_state(&function_state_, &scope_,
- Traits::Type::ptr_to_scope(scope),
- &function_factory);
+ typename Traits::Type::Factory function_factory(ast_value_factory());
+ FunctionState function_state(&function_state_, &scope_, scope,
+ kArrowFunction, &function_factory);
Scanner::Location dupe_error_loc = Scanner::Location::invalid();
+ // TODO(arv): Pass in eval_args_error_loc and reserved_loc here.
num_parameters = Traits::DeclareArrowParametersFromExpression(
params_ast, scope_, &dupe_error_loc, ok);
if (!*ok) {
@@ -2778,8 +2884,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
} else {
body = this->ParseEagerFunctionBody(
this->EmptyIdentifier(), RelocInfo::kNoPosition, NULL,
- Token::INIT_VAR, false, // Not a generator.
- CHECK_OK);
+ Token::INIT_VAR, kArrowFunction, CHECK_OK);
materialized_literal_count =
function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
@@ -2801,27 +2906,26 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
scope->set_end_position(scanner()->location().end_pos);
// Arrow function *parameter lists* are always checked as in strict mode.
- bool function_name_is_strict_reserved = false;
- Scanner::Location function_name_loc = Scanner::Location::invalid();
+ // TODO(arv): eval_args_error_loc and reserved_loc needs to be set by
+ // DeclareArrowParametersFromExpression.
Scanner::Location eval_args_error_loc = Scanner::Location::invalid();
Scanner::Location reserved_loc = Scanner::Location::invalid();
- this->CheckStrictFunctionNameAndParameters(
- this->EmptyIdentifier(), function_name_is_strict_reserved,
- function_name_loc, eval_args_error_loc, dupe_error_loc, reserved_loc,
- CHECK_OK);
+ const bool use_strict_params = true;
+ this->CheckFunctionParameterNames(language_mode(), use_strict_params,
+ eval_args_error_loc, dupe_error_loc, reserved_loc, CHECK_OK);
// Validate strict mode.
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
CheckStrictOctalLiteral(start_pos, scanner()->location().end_pos,
CHECK_OK);
}
- if (allow_harmony_scoping() && strict_mode() == STRICT)
+ if (allow_harmony_scoping() && is_strict(language_mode()))
this->CheckConflictingVarDeclarations(scope, CHECK_OK);
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
- this->EmptyIdentifierString(), this->ast_value_factory(), scope, body,
+ this->EmptyIdentifierString(), ast_value_factory(), scope, body,
materialized_literal_count, expected_property_count, handler_count,
num_parameters, FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
@@ -2874,6 +2978,7 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start, bool* ok) {
// case, representing a TemplateMiddle).
do {
+ CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
next = peek();
if (next == Token::EOS) {
ReportMessageAt(Scanner::Location(start, peek_position()),
@@ -2903,10 +3008,10 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start, bool* ok) {
// TEMPLATE_SPAN or TEMPLATE_TAIL.
next = scanner()->ScanTemplateContinuation();
Next();
+ pos = position();
if (next == Token::EOS) {
- ReportMessageAt(Scanner::Location(start, position()),
- "unterminated_template");
+ ReportMessageAt(Scanner::Location(start, pos), "unterminated_template");
*ok = false;
return Traits::EmptyExpression();
} else if (next == Token::ILLEGAL) {
@@ -2932,7 +3037,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
Traits>::CheckAndRewriteReferenceExpression(ExpressionT expression,
Scanner::Location location,
const char* message, bool* ok) {
- if (strict_mode() == STRICT && this->IsIdentifier(expression) &&
+ if (is_strict(language_mode()) && this->IsIdentifier(expression) &&
this->IsEvalOrArguments(this->AsIdentifier(expression))) {
this->ReportMessageAt(location, "strict_eval_arguments", false);
*ok = false;
@@ -2959,28 +3064,52 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
template <typename Traits>
void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
- Token::Value property, PropertyKind type, bool* ok) {
- int old;
- if (property == Token::NUMBER) {
- old = scanner()->FindNumber(&finder_, type);
- } else {
- old = scanner()->FindSymbol(&finder_, type);
- }
- PropertyKind old_type = static_cast<PropertyKind>(old);
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (strict_mode_ == SLOPPY) return;
- parser()->ReportMessage("strict_duplicate_property");
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- parser()->ReportMessage("accessor_data_property");
- } else {
- DCHECK(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- parser()->ReportMessage("accessor_get_set");
+ Token::Value property, PropertyKind type, bool is_static, bool is_generator,
+ bool* ok) {
+ DCHECK(!is_static);
+ DCHECK(!is_generator || type == kMethodProperty);
+
+ if (property == Token::NUMBER) return;
+
+ if (type == kValueProperty && IsProto()) {
+ if (has_seen_proto_) {
+ this->parser()->ReportMessage("duplicate_proto");
+ *ok = false;
+ return;
}
- *ok = false;
+ has_seen_proto_ = true;
+ return;
+ }
+}
+
+
+template <typename Traits>
+void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
+ Token::Value property, PropertyKind type, bool is_static, bool is_generator,
+ bool* ok) {
+ DCHECK(type == kMethodProperty || type == kAccessorProperty);
+
+ if (property == Token::NUMBER) return;
+
+ if (is_static) {
+ if (IsPrototype()) {
+ this->parser()->ReportMessage("static_prototype");
+ *ok = false;
+ return;
+ }
+ } else if (IsConstructor()) {
+ if (is_generator || type == kAccessorProperty) {
+ this->parser()->ReportMessage("constructor_special_method");
+ *ok = false;
+ return;
+ }
+ if (has_seen_constructor_) {
+ this->parser()->ReportMessage("duplicate_constructor");
+ *ok = false;
+ return;
+ }
+ has_seen_constructor_ = true;
+ return;
}
}
} } // v8::internal
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 1ff2edd285..da43d0eb0f 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -14,13 +14,428 @@
namespace v8 {
namespace internal {
+CallPrinter::CallPrinter(Isolate* isolate, Zone* zone) {
+ output_ = NULL;
+ size_ = 0;
+ pos_ = 0;
+ position_ = 0;
+ found_ = false;
+ done_ = false;
+ InitializeAstVisitor(isolate, zone);
+}
+
+
+CallPrinter::~CallPrinter() { DeleteArray(output_); }
+
+
+const char* CallPrinter::Print(FunctionLiteral* program, int position) {
+ Init();
+ position_ = position;
+ Find(program);
+ return output_;
+}
+
+
+void CallPrinter::Find(AstNode* node, bool print) {
+ if (done_) return;
+ if (found_) {
+ if (print) {
+ int start = pos_;
+ Visit(node);
+ if (start != pos_) return;
+ }
+ Print("(intermediate value)");
+ } else {
+ Visit(node);
+ }
+}
+
+
+void CallPrinter::Init() {
+ if (size_ == 0) {
+ DCHECK(output_ == NULL);
+ const int initial_size = 256;
+ output_ = NewArray<char>(initial_size);
+ size_ = initial_size;
+ }
+ output_[0] = '\0';
+ pos_ = 0;
+}
+
+
+void CallPrinter::Print(const char* format, ...) {
+ if (!found_ || done_) return;
+ for (;;) {
+ va_list arguments;
+ va_start(arguments, format);
+ int n = VSNPrintF(Vector<char>(output_, size_) + pos_, format, arguments);
+ va_end(arguments);
+
+ if (n >= 0) {
+ // there was enough space - we are done
+ pos_ += n;
+ return;
+ } else {
+ // there was not enough space - allocate more and try again
+ const int slack = 32;
+ int new_size = size_ + (size_ >> 1) + slack;
+ char* new_output = NewArray<char>(new_size);
+ MemCopy(new_output, output_, pos_);
+ DeleteArray(output_);
+ output_ = new_output;
+ size_ = new_size;
+ }
+ }
+}
+
+
+void CallPrinter::VisitBlock(Block* node) {
+ FindStatements(node->statements());
+}
+
+
+void CallPrinter::VisitVariableDeclaration(VariableDeclaration* node) {}
+
+
+void CallPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {}
+
+
+void CallPrinter::VisitModuleDeclaration(ModuleDeclaration* node) {
+ Find(node->module());
+}
+
+
+void CallPrinter::VisitImportDeclaration(ImportDeclaration* node) {
+ Find(node->module());
+}
+
+
+void CallPrinter::VisitExportDeclaration(ExportDeclaration* node) {}
+
+
+void CallPrinter::VisitModuleLiteral(ModuleLiteral* node) {
+ VisitBlock(node->body());
+}
+
+
+void CallPrinter::VisitModulePath(ModulePath* node) { Find(node->module()); }
+
+
+void CallPrinter::VisitModuleUrl(ModuleUrl* node) {}
+
+
+void CallPrinter::VisitModuleStatement(ModuleStatement* node) {
+ Find(node->body());
+}
+
+
+void CallPrinter::VisitExpressionStatement(ExpressionStatement* node) {
+ Find(node->expression());
+}
+
+
+void CallPrinter::VisitEmptyStatement(EmptyStatement* node) {}
+
+
+void CallPrinter::VisitIfStatement(IfStatement* node) {
+ Find(node->condition());
+ Find(node->then_statement());
+ if (node->HasElseStatement()) {
+ Find(node->else_statement());
+ }
+}
+
+
+void CallPrinter::VisitContinueStatement(ContinueStatement* node) {}
+
+
+void CallPrinter::VisitBreakStatement(BreakStatement* node) {}
+
+
+void CallPrinter::VisitReturnStatement(ReturnStatement* node) {
+ Find(node->expression());
+}
+
+
+void CallPrinter::VisitWithStatement(WithStatement* node) {
+ Find(node->expression());
+ Find(node->statement());
+}
+
+
+void CallPrinter::VisitSwitchStatement(SwitchStatement* node) {
+ Find(node->tag());
+ ZoneList<CaseClause*>* cases = node->cases();
+ for (int i = 0; i < cases->length(); i++) Find(cases->at(i));
+}
+
+
+void CallPrinter::VisitCaseClause(CaseClause* clause) {
+ if (!clause->is_default()) {
+ Find(clause->label());
+ }
+ FindStatements(clause->statements());
+}
+
+
+void CallPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
+ Find(node->body());
+ Find(node->cond());
+}
+
+
+void CallPrinter::VisitWhileStatement(WhileStatement* node) {
+ Find(node->cond());
+ Find(node->body());
+}
+
+
+void CallPrinter::VisitForStatement(ForStatement* node) {
+ if (node->init() != NULL) {
+ Find(node->init());
+ }
+ if (node->cond() != NULL) Find(node->cond());
+ if (node->next() != NULL) Find(node->next());
+ Find(node->body());
+}
+
+
+void CallPrinter::VisitForInStatement(ForInStatement* node) {
+ Find(node->each());
+ Find(node->enumerable());
+ Find(node->body());
+}
+
+
+void CallPrinter::VisitForOfStatement(ForOfStatement* node) {
+ Find(node->each());
+ Find(node->iterable());
+ Find(node->body());
+}
+
+
+void CallPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
+ Find(node->try_block());
+ Find(node->catch_block());
+}
+
+
+void CallPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
+ Find(node->try_block());
+ Find(node->finally_block());
+}
+
+
+void CallPrinter::VisitDebuggerStatement(DebuggerStatement* node) {}
+
+
+void CallPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
+ FindStatements(node->body());
+}
+
+
+void CallPrinter::VisitClassLiteral(ClassLiteral* node) {
+ if (node->extends()) Find(node->extends());
+ for (int i = 0; i < node->properties()->length(); i++) {
+ Find(node->properties()->at(i)->value());
+ }
+}
+
+
+void CallPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {}
+
+
+void CallPrinter::VisitConditional(Conditional* node) {
+ Find(node->condition());
+ Find(node->then_expression());
+ Find(node->else_expression());
+}
+
+
+void CallPrinter::VisitLiteral(Literal* node) {
+ PrintLiteral(node->value(), true);
+}
+
+
+void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
+ Print("/");
+ PrintLiteral(node->pattern(), false);
+ Print("/");
+ PrintLiteral(node->flags(), false);
+}
+
+
+void CallPrinter::VisitObjectLiteral(ObjectLiteral* node) {
+ for (int i = 0; i < node->properties()->length(); i++) {
+ Find(node->properties()->at(i)->value());
+ }
+}
+
+
+void CallPrinter::VisitArrayLiteral(ArrayLiteral* node) {
+ Print("[");
+ for (int i = 0; i < node->values()->length(); i++) {
+ if (i != 0) Print(",");
+ Find(node->values()->at(i), true);
+ }
+ Print("]");
+}
+
+
+void CallPrinter::VisitVariableProxy(VariableProxy* node) {
+ PrintLiteral(node->name(), false);
+}
+
+
+void CallPrinter::VisitAssignment(Assignment* node) {
+ Find(node->target());
+ Find(node->value());
+}
+
+
+void CallPrinter::VisitYield(Yield* node) { Find(node->expression()); }
+
+
+void CallPrinter::VisitThrow(Throw* node) { Find(node->exception()); }
+
+
+void CallPrinter::VisitProperty(Property* node) {
+ Expression* key = node->key();
+ Literal* literal = key->AsLiteral();
+ if (literal != NULL && literal->value()->IsInternalizedString()) {
+ Find(node->obj(), true);
+ Print(".");
+ PrintLiteral(literal->value(), false);
+ } else {
+ Find(node->obj(), true);
+ Print("[");
+ Find(key, true);
+ Print("]");
+ }
+}
+
+
+void CallPrinter::VisitCall(Call* node) {
+ bool was_found = !found_ && node->position() == position_;
+ if (was_found) found_ = true;
+ Find(node->expression(), true);
+ if (!was_found) Print("(...)");
+ FindArguments(node->arguments());
+ if (was_found) done_ = true;
+}
+
+
+void CallPrinter::VisitCallNew(CallNew* node) {
+ bool was_found = !found_ && node->expression()->position() == position_;
+ if (was_found) found_ = true;
+ Find(node->expression(), was_found);
+ FindArguments(node->arguments());
+ if (was_found) done_ = true;
+}
+
+
+void CallPrinter::VisitCallRuntime(CallRuntime* node) {
+ FindArguments(node->arguments());
+}
+
+
+void CallPrinter::VisitUnaryOperation(UnaryOperation* node) {
+ Token::Value op = node->op();
+ bool needsSpace =
+ op == Token::DELETE || op == Token::TYPEOF || op == Token::VOID;
+ Print("(%s%s", Token::String(op), needsSpace ? " " : "");
+ Find(node->expression(), true);
+ Print(")");
+}
+
+
+void CallPrinter::VisitCountOperation(CountOperation* node) {
+ Print("(");
+ if (node->is_prefix()) Print("%s", Token::String(node->op()));
+ Find(node->expression(), true);
+ if (node->is_postfix()) Print("%s", Token::String(node->op()));
+ Print(")");
+}
+
+
+void CallPrinter::VisitBinaryOperation(BinaryOperation* node) {
+ Print("(");
+ Find(node->left(), true);
+ Print(" %s ", Token::String(node->op()));
+ Find(node->right(), true);
+ Print(")");
+}
+
+
+void CallPrinter::VisitCompareOperation(CompareOperation* node) {
+ Print("(");
+ Find(node->left(), true);
+ Print(" %s ", Token::String(node->op()));
+ Find(node->right(), true);
+ Print(")");
+}
+
+
+void CallPrinter::VisitThisFunction(ThisFunction* node) {}
+
+
+void CallPrinter::VisitSuperReference(SuperReference* node) {}
+
+
+void CallPrinter::FindStatements(ZoneList<Statement*>* statements) {
+ if (statements == NULL) return;
+ for (int i = 0; i < statements->length(); i++) {
+ Find(statements->at(i));
+ }
+}
+
+
+void CallPrinter::FindArguments(ZoneList<Expression*>* arguments) {
+ if (found_) return;
+ for (int i = 0; i < arguments->length(); i++) {
+ Find(arguments->at(i));
+ }
+}
+
+
+void CallPrinter::PrintLiteral(Handle<Object> value, bool quote) {
+ Object* object = *value;
+ if (object->IsString()) {
+ String* string = String::cast(object);
+ if (quote) Print("\"");
+ for (int i = 0; i < string->length(); i++) {
+ Print("%c", string->Get(i));
+ }
+ if (quote) Print("\"");
+ } else if (object->IsNull()) {
+ Print("null");
+ } else if (object->IsTrue()) {
+ Print("true");
+ } else if (object->IsFalse()) {
+ Print("false");
+ } else if (object->IsUndefined()) {
+ Print("undefined");
+ } else if (object->IsNumber()) {
+ Print("%g", object->Number());
+ }
+}
+
+
+void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
+ PrintLiteral(value->string(), quote);
+}
+
+
+//-----------------------------------------------------------------------------
+
+
#ifdef DEBUG
-PrettyPrinter::PrettyPrinter(Zone* zone) {
+PrettyPrinter::PrettyPrinter(Isolate* isolate, Zone* zone) {
output_ = NULL;
size_ = 0;
pos_ = 0;
- InitializeAstVisitor(zone);
+ InitializeAstVisitor(isolate, zone);
}
@@ -83,11 +498,6 @@ void PrettyPrinter::VisitModuleLiteral(ModuleLiteral* node) {
}
-void PrettyPrinter::VisitModuleVariable(ModuleVariable* node) {
- Visit(node->proxy());
-}
-
-
void PrettyPrinter::VisitModulePath(ModulePath* node) {
Visit(node->module());
Print(".");
@@ -103,8 +513,6 @@ void PrettyPrinter::VisitModuleUrl(ModuleUrl* node) {
void PrettyPrinter::VisitModuleStatement(ModuleStatement* node) {
Print("module ");
- PrintLiteral(node->proxy()->name(), false);
- Print(" ");
Visit(node->body());
}
@@ -497,8 +905,8 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
}
-void PrettyPrinter::PrintOut(Zone* zone, AstNode* node) {
- PrettyPrinter printer(zone);
+void PrettyPrinter::PrintOut(Isolate* isolate, Zone* zone, AstNode* node) {
+ PrettyPrinter printer(isolate, zone);
PrintF("%s", printer.Print(node));
}
@@ -666,8 +1074,8 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-AstPrinter::AstPrinter(Zone* zone) : PrettyPrinter(zone), indent_(0) {
-}
+AstPrinter::AstPrinter(Isolate* isolate, Zone* zone)
+ : PrettyPrinter(isolate, zone), indent_(0) {}
AstPrinter::~AstPrinter() {
@@ -821,12 +1229,6 @@ void AstPrinter::VisitModuleLiteral(ModuleLiteral* node) {
}
-void AstPrinter::VisitModuleVariable(ModuleVariable* node) {
- IndentedScope indent(this, "MODULE VARIABLE");
- Visit(node->proxy());
-}
-
-
void AstPrinter::VisitModulePath(ModulePath* node) {
IndentedScope indent(this, "MODULE PATH");
PrintIndentedVisit("MODULE PATH PARENT", node->module());
@@ -841,7 +1243,6 @@ void AstPrinter::VisitModuleUrl(ModuleUrl* node) {
void AstPrinter::VisitModuleStatement(ModuleStatement* node) {
IndentedScope indent(this, "MODULE STATEMENT");
- PrintLiteralIndented("NAME", node->proxy()->name(), true);
PrintStatements(node->body()->statements());
}
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h
index bf015204c9..d1943bcc79 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/prettyprinter.h
@@ -11,11 +11,48 @@
namespace v8 {
namespace internal {
+class CallPrinter : public AstVisitor {
+ public:
+ CallPrinter(Isolate* isolate, Zone* zone);
+ virtual ~CallPrinter();
+
+ // The following routine prints the node with position |position| into a
+ // string. The result string is alive as long as the CallPrinter is alive.
+ const char* Print(FunctionLiteral* program, int position);
+
+ void Print(const char* format, ...);
+
+ void Find(AstNode* node, bool print = false);
+
+// Individual nodes
+#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+ void Init();
+ char* output_; // output string buffer
+ int size_; // output_ size
+ int pos_; // current printing position
+ int position_; // position of ast node to print
+ bool found_;
+ bool done_;
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ protected:
+ void PrintLiteral(Handle<Object> value, bool quote);
+ void PrintLiteral(const AstRawString* value, bool quote);
+ void FindStatements(ZoneList<Statement*>* statements);
+ void FindArguments(ZoneList<Expression*>* arguments);
+};
+
+
#ifdef DEBUG
class PrettyPrinter: public AstVisitor {
public:
- explicit PrettyPrinter(Zone* zone);
+ PrettyPrinter(Isolate* isolate, Zone* zone);
virtual ~PrettyPrinter();
// The following routines print a node into a string.
@@ -27,7 +64,7 @@ class PrettyPrinter: public AstVisitor {
void Print(const char* format, ...);
// Print a node to stdout.
- static void PrintOut(Zone* zone, AstNode* node);
+ static void PrintOut(Isolate* isolate, Zone* zone, AstNode* node);
// Individual nodes
#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
@@ -61,7 +98,7 @@ class PrettyPrinter: public AstVisitor {
// Prints the AST structure
class AstPrinter: public PrettyPrinter {
public:
- explicit AstPrinter(Zone* zone);
+ AstPrinter(Isolate* isolate, Zone* zone);
virtual ~AstPrinter();
const char* PrintProgram(FunctionLiteral* program);
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index c53d749918..d9f55dd44b 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -25,6 +25,8 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
script_id_(v8::UnboundScript::kNoScriptId),
no_frame_ranges_(NULL),
bailout_reason_(kEmptyBailoutReason),
+ deopt_reason_(kNoDeoptReason),
+ deopt_location_(0),
line_info_(line_info),
instruction_start_(instruction_start) {}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 7ad491021b..292e3325d1 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -8,11 +8,11 @@
#include "src/compiler.h"
#include "src/debug.h"
+#include "src/deoptimizer.h"
#include "src/global-handles.h"
#include "src/sampler.h"
#include "src/scopeinfo.h"
#include "src/unicode.h"
-#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -160,6 +160,7 @@ int JITLineInfoTable::GetSourceLineNumber(int pc_offset) const {
const char* const CodeEntry::kEmptyNamePrefix = "";
const char* const CodeEntry::kEmptyResourceName = "";
const char* const CodeEntry::kEmptyBailoutReason = "";
+const char* const CodeEntry::kNoDeoptReason = "";
CodeEntry::~CodeEntry() {
@@ -213,6 +214,12 @@ int CodeEntry::GetSourceLine(int pc_offset) const {
}
+void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
+ deopt_infos_.Add(DeoptInfo(entry->deopt_reason(), entry->deopt_location()));
+ entry->clear_deopt_info();
+}
+
+
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), false);
@@ -224,13 +231,14 @@ ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), true);
- if (map_entry->value == NULL) {
+ ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
+ if (node == NULL) {
// New node added.
- ProfileNode* new_node = new ProfileNode(tree_, entry);
- map_entry->value = new_node;
- children_list_.Add(new_node);
+ node = new ProfileNode(tree_, entry);
+ map_entry->value = node;
+ children_list_.Add(node);
}
- return reinterpret_cast<ProfileNode*>(map_entry->value);
+ return node;
}
@@ -269,12 +277,22 @@ bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
void ProfileNode::Print(int indent) {
- base::OS::Print("%5u %*s %s%s %d #%d %s", self_ticks_, indent, "",
+ base::OS::Print("%5u %*s %s%s %d #%d", self_ticks_, indent, "",
entry_->name_prefix(), entry_->name(), entry_->script_id(),
- id(), entry_->bailout_reason());
+ id());
if (entry_->resource_name()[0] != '\0')
base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
base::OS::Print("\n");
+ for (auto info : deopt_infos_) {
+ base::OS::Print("%*s deopted at %d with reason '%s'\n", indent + 10, "",
+ info.deopt_location, info.deopt_reason);
+ }
+ const char* bailout_reason = entry_->bailout_reason();
+ if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
+ bailout_reason != CodeEntry::kEmptyBailoutReason) {
+ base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
+ bailout_reason);
+ }
for (HashMap::Entry* p = children_.Start();
p != NULL;
p = children_.Next(p)) {
@@ -311,35 +329,23 @@ ProfileTree::~ProfileTree() {
ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
int src_line) {
ProfileNode* node = root_;
+ CodeEntry* last_entry = NULL;
for (CodeEntry** entry = path.start() + path.length() - 1;
entry != path.start() - 1;
--entry) {
if (*entry != NULL) {
node = node->FindOrAddChild(*entry);
+ last_entry = *entry;
}
}
- node->IncrementSelfTicks();
- if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
- node->IncrementLineTicks(src_line);
- }
- return node;
-}
-
-
-void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path,
- int src_line) {
- ProfileNode* node = root_;
- for (CodeEntry** entry = path.start();
- entry != path.start() + path.length();
- ++entry) {
- if (*entry != NULL) {
- node = node->FindOrAddChild(*entry);
- }
+ if (last_entry && last_entry->has_deopt_info()) {
+ node->CollectDeoptInfo(last_entry);
}
node->IncrementSelfTicks();
if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
node->IncrementLineTicks(src_line);
}
+ return node;
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 2127a1e1ca..f7176a053a 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -92,6 +92,19 @@ class CodeEntry {
}
const char* bailout_reason() const { return bailout_reason_; }
+ void set_deopt_info(const char* deopt_reason, int location) {
+ DCHECK(!deopt_location_);
+ deopt_reason_ = deopt_reason;
+ deopt_location_ = location;
+ }
+ const char* deopt_reason() const { return deopt_reason_; }
+ int deopt_location() const { return deopt_location_; }
+ bool has_deopt_info() const { return deopt_location_; }
+ void clear_deopt_info() {
+ deopt_reason_ = kNoDeoptReason;
+ deopt_location_ = 0;
+ }
+
static inline bool is_js_function_tag(Logger::LogEventsAndTags tag);
List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; }
@@ -114,6 +127,7 @@ class CodeEntry {
static const char* const kEmptyNamePrefix;
static const char* const kEmptyResourceName;
static const char* const kEmptyBailoutReason;
+ static const char* const kNoDeoptReason;
private:
class TagField : public BitField<Logger::LogEventsAndTags, 0, 8> {};
@@ -130,6 +144,8 @@ class CodeEntry {
int script_id_;
List<OffsetRange>* no_frame_ranges_;
const char* bailout_reason_;
+ const char* deopt_reason_;
+ int deopt_location_;
JITLineInfoTable* line_info_;
Address instruction_start_;
@@ -140,6 +156,17 @@ class CodeEntry {
class ProfileTree;
class ProfileNode {
+ private:
+ struct DeoptInfo {
+ DeoptInfo(const char* deopt_reason, int deopt_location)
+ : deopt_reason(deopt_reason), deopt_location(deopt_location) {}
+ DeoptInfo(const DeoptInfo& info)
+ : deopt_reason(info.deopt_reason),
+ deopt_location(info.deopt_location) {}
+ const char* deopt_reason;
+ int deopt_location;
+ };
+
public:
inline ProfileNode(ProfileTree* tree, CodeEntry* entry);
@@ -156,6 +183,8 @@ class ProfileNode {
unsigned int GetHitLineCount() const { return line_ticks_.occupancy(); }
bool GetLineTicks(v8::CpuProfileNode::LineTick* entries,
unsigned int length) const;
+ void CollectDeoptInfo(CodeEntry* entry);
+ const List<DeoptInfo>& deopt_infos() const { return deopt_infos_; }
void Print(int indent);
@@ -180,6 +209,7 @@ class ProfileNode {
unsigned id_;
HashMap line_ticks_;
+ List<DeoptInfo> deopt_infos_;
DISALLOW_COPY_AND_ASSIGN(ProfileNode);
};
@@ -192,8 +222,6 @@ class ProfileTree {
ProfileNode* AddPathFromEnd(
const Vector<CodeEntry*>& path,
int src_line = v8::CpuProfileNode::kNoLineNumberInfo);
- void AddPathFromStart(const Vector<CodeEntry*>& path,
- int src_line = v8::CpuProfileNode::kNoLineNumberInfo);
ProfileNode* root() const { return root_; }
unsigned next_node_id() { return next_node_id_++; }
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 6140e0d4bb..135a079d26 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -43,22 +43,22 @@ class TypeInfo;
// Type of properties.
// Order of kinds is significant.
// Must fit in the BitField PropertyDetails::KindField.
-enum PropertyKind { DATA = 0, ACCESSOR = 1 };
+enum PropertyKind { kData = 0, kAccessor = 1 };
// Order of modes is significant.
// Must fit in the BitField PropertyDetails::StoreModeField.
-enum PropertyLocation { IN_OBJECT = 0, IN_DESCRIPTOR = 1 };
+enum PropertyLocation { kField = 0, kDescriptor = 1 };
// Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField.
// A copy of this is in mirror-debugger.js.
enum PropertyType {
- FIELD = (IN_OBJECT << 1) | DATA,
- CONSTANT = (IN_DESCRIPTOR << 1) | DATA,
- ACCESSOR_FIELD = (IN_OBJECT << 1) | ACCESSOR,
- CALLBACKS = (IN_DESCRIPTOR << 1) | ACCESSOR
+ DATA = (kField << 1) | kData,
+ DATA_CONSTANT = (kDescriptor << 1) | kData,
+ ACCESSOR = (kField << 1) | kAccessor,
+ ACCESSOR_CONSTANT = (kDescriptor << 1) | kAccessor
};
@@ -212,6 +212,15 @@ class PropertyDetails BASE_EMBEDDED {
| FieldIndexField::encode(field_index);
}
+ PropertyDetails(PropertyAttributes attributes, PropertyKind kind,
+ PropertyLocation location, Representation representation,
+ int field_index = 0) {
+ value_ = KindField::encode(kind) | LocationField::encode(location) |
+ AttributesField::encode(attributes) |
+ RepresentationField::encode(EncodeRepresentation(representation)) |
+ FieldIndexField::encode(field_index);
+ }
+
int pointer() const { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index d00998c256..3b4ddafd82 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -10,27 +10,6 @@
namespace v8 {
namespace internal {
-void LookupResult::Iterate(ObjectVisitor* visitor) {
- LookupResult* current = this; // Could be NULL.
- while (current != NULL) {
- visitor->VisitPointer(bit_cast<Object**>(&current->holder_));
- visitor->VisitPointer(bit_cast<Object**>(&current->transition_));
- current = current->next_;
- }
-}
-
-
-std::ostream& operator<<(std::ostream& os, const LookupResult& r) {
- if (!r.IsFound()) return os << "Not Found\n";
-
- os << "LookupResult:\n";
- if (r.IsTransition()) {
- os << " -transition target:\n" << Brief(r.GetTransitionTarget()) << "\n";
- }
- return os;
-}
-
-
std::ostream& operator<<(std::ostream& os,
const PropertyAttributes& attributes) {
os << "[";
@@ -51,10 +30,10 @@ struct FastPropertyDetails {
// Outputs PropertyDetails as a dictionary details.
std::ostream& operator<<(std::ostream& os, const PropertyDetails& details) {
os << "(";
- if (details.location() == IN_DESCRIPTOR) {
+ if (details.location() == kDescriptor) {
os << "immutable ";
}
- os << (details.kind() == DATA ? "data" : "accessor");
+ os << (details.kind() == kData ? "data" : "accessor");
return os << ", dictionary_index: " << details.dictionary_index()
<< ", attrs: " << details.attributes() << ")";
}
@@ -65,13 +44,13 @@ std::ostream& operator<<(std::ostream& os,
const FastPropertyDetails& details_fast) {
const PropertyDetails& details = details_fast.details;
os << "(";
- if (details.location() == IN_DESCRIPTOR) {
+ if (details.location() == kDescriptor) {
os << "immutable ";
}
- os << (details.kind() == DATA ? "data" : "accessor");
- if (details.location() == IN_OBJECT) {
- os << ": " << details.representation().Mnemonic()
- << ", field_index: " << details.field_index();
+ os << (details.kind() == kData ? "data" : "accessor");
+ os << ": " << details.representation().Mnemonic();
+ if (details.location() == kField) {
+ os << ", field_index: " << details.field_index();
}
return os << ", p: " << details.pointer()
<< ", attrs: " << details.attributes() << ")";
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index a9d8b09354..5f8e6da407 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -73,180 +73,37 @@ class Descriptor BASE_EMBEDDED {
std::ostream& operator<<(std::ostream& os, const Descriptor& d);
-class FieldDescriptor FINAL : public Descriptor {
+class DataDescriptor FINAL : public Descriptor {
public:
- FieldDescriptor(Handle<Name> key,
- int field_index,
- PropertyAttributes attributes,
- Representation representation)
- : Descriptor(key, HeapType::Any(key->GetIsolate()), attributes,
- FIELD, representation, field_index) {}
- FieldDescriptor(Handle<Name> key,
- int field_index,
- Handle<HeapType> field_type,
- PropertyAttributes attributes,
- Representation representation)
- : Descriptor(key, field_type, attributes, FIELD,
- representation, field_index) { }
+ DataDescriptor(Handle<Name> key, int field_index,
+ PropertyAttributes attributes, Representation representation)
+ : Descriptor(key, HeapType::Any(key->GetIsolate()), attributes, DATA,
+ representation, field_index) {}
+ DataDescriptor(Handle<Name> key, int field_index, Handle<HeapType> field_type,
+ PropertyAttributes attributes, Representation representation)
+ : Descriptor(key, field_type, attributes, DATA, representation,
+ field_index) {}
};
-class ConstantDescriptor FINAL : public Descriptor {
+class DataConstantDescriptor FINAL : public Descriptor {
public:
- ConstantDescriptor(Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes)
- : Descriptor(key, value, attributes, CONSTANT,
+ DataConstantDescriptor(Handle<Name> key, Handle<Object> value,
+ PropertyAttributes attributes)
+ : Descriptor(key, value, attributes, DATA_CONSTANT,
value->OptimalRepresentation()) {}
};
-class CallbacksDescriptor FINAL : public Descriptor {
+class AccessorConstantDescriptor FINAL : public Descriptor {
public:
- CallbacksDescriptor(Handle<Name> key,
- Handle<Object> foreign,
- PropertyAttributes attributes)
- : Descriptor(key, foreign, attributes, CALLBACKS,
+ AccessorConstantDescriptor(Handle<Name> key, Handle<Object> foreign,
+ PropertyAttributes attributes)
+ : Descriptor(key, foreign, attributes, ACCESSOR_CONSTANT,
Representation::Tagged()) {}
};
-class LookupResult FINAL BASE_EMBEDDED {
- public:
- explicit LookupResult(Isolate* isolate)
- : isolate_(isolate),
- next_(isolate->top_lookup_result()),
- lookup_type_(NOT_FOUND),
- holder_(NULL),
- transition_(NULL),
- details_(NONE, FIELD, Representation::None()) {
- isolate->set_top_lookup_result(this);
- }
-
- ~LookupResult() {
- DCHECK(isolate()->top_lookup_result() == this);
- isolate()->set_top_lookup_result(next_);
- }
-
- Isolate* isolate() const { return isolate_; }
-
- void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
- lookup_type_ = DESCRIPTOR_TYPE;
- holder_ = holder;
- transition_ = NULL;
- details_ = details;
- number_ = number;
- }
-
- void TransitionResult(JSObject* holder, Map* target) {
- lookup_type_ = TRANSITION_TYPE;
- number_ = target->LastAdded();
- details_ = target->instance_descriptors()->GetDetails(number_);
- holder_ = holder;
- transition_ = target;
- }
-
- void NotFound() {
- lookup_type_ = NOT_FOUND;
- details_ = PropertyDetails(NONE, FIELD, 0);
- holder_ = NULL;
- transition_ = NULL;
- }
-
- Representation representation() const {
- DCHECK(IsFound());
- return details_.representation();
- }
-
- // Property callbacks does not include transitions to callbacks.
- bool IsPropertyCallbacks() const {
- return !IsTransition() && details_.type() == CALLBACKS;
- }
-
- bool IsReadOnly() const {
- DCHECK(IsFound());
- return details_.IsReadOnly();
- }
-
- bool IsField() const {
- return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == FIELD;
- }
-
- bool IsConstant() const {
- return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == CONSTANT;
- }
-
- bool IsConfigurable() const { return details_.IsConfigurable(); }
- bool IsFound() const { return lookup_type_ != NOT_FOUND; }
- bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
-
- // Is the result is a property excluding transitions and the null descriptor?
- bool IsProperty() const {
- return IsFound() && !IsTransition();
- }
-
- Map* GetTransitionTarget() const {
- DCHECK(IsTransition());
- return transition_;
- }
-
- bool IsTransitionToField() const {
- return IsTransition() && details_.type() == FIELD;
- }
-
- int GetLocalFieldIndexFromMap(Map* map) const {
- return GetFieldIndexFromMap(map) - map->inobject_properties();
- }
-
- Object* GetConstantFromMap(Map* map) const {
- DCHECK(details_.type() == CONSTANT);
- return GetValueFromMap(map);
- }
-
- Object* GetValueFromMap(Map* map) const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
- lookup_type_ == TRANSITION_TYPE);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return map->instance_descriptors()->GetValue(number_);
- }
-
- int GetFieldIndexFromMap(Map* map) const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
- lookup_type_ == TRANSITION_TYPE);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return map->instance_descriptors()->GetFieldIndex(number_);
- }
-
- HeapType* GetFieldTypeFromMap(Map* map) const {
- DCHECK_NE(NOT_FOUND, lookup_type_);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return map->instance_descriptors()->GetFieldType(number_);
- }
-
- Map* GetFieldOwnerFromMap(Map* map) const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
- lookup_type_ == TRANSITION_TYPE);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return map->FindFieldOwner(number_);
- }
-
- void Iterate(ObjectVisitor* visitor);
-
- private:
- Isolate* isolate_;
- LookupResult* next_;
-
- // Where did we find the result;
- enum { NOT_FOUND, DESCRIPTOR_TYPE, TRANSITION_TYPE } lookup_type_;
-
- JSReceiver* holder_;
- Map* transition_;
- int number_;
- PropertyDetails details_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const LookupResult& r);
} } // namespace v8::internal
#endif // V8_PROPERTY_H_
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index 469fb8cbb3..5574730a57 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -16,14 +16,15 @@ namespace internal {
#ifdef V8_INTERPRETED_REGEXP
-RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer,
+RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Isolate* isolate,
+ Vector<byte> buffer,
Zone* zone)
- : RegExpMacroAssembler(zone),
+ : RegExpMacroAssembler(isolate, zone),
buffer_(buffer),
pc_(0),
own_buffer_(false),
advance_current_end_(kInvalidPC),
- isolate_(zone->isolate()) { }
+ isolate_(isolate) {}
RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h
index b192c22b6f..781defc297 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.h
@@ -27,7 +27,8 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- RegExpMacroAssemblerIrregexp(Vector<byte>, Zone* zone);
+ RegExpMacroAssemblerIrregexp(Isolate* isolate, Vector<byte> buffer,
+ Zone* zone);
virtual ~RegExpMacroAssemblerIrregexp();
// The byte-code interpreter checks on each push anyway.
virtual int stack_limit_slack() { return 1; }
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index 14da2da895..71d0b9b0f7 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -12,9 +12,8 @@ namespace v8 {
namespace internal {
RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
- RegExpMacroAssembler* assembler) :
- RegExpMacroAssembler(assembler->zone()),
- assembler_(assembler) {
+ Isolate* isolate, RegExpMacroAssembler* assembler)
+ : RegExpMacroAssembler(isolate, assembler->zone()), assembler_(assembler) {
unsigned int type = assembler->Implementation();
DCHECK(type < 6);
const char* impl_names[] = {"IA32", "ARM", "ARM64",
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h
index ac76cfdb06..67b1710e45 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp-macro-assembler-tracer.h
@@ -11,7 +11,7 @@ namespace internal {
// Decorator on a RegExpMacroAssembler that write all calls.
class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
public:
- explicit RegExpMacroAssemblerTracer(RegExpMacroAssembler* assembler);
+ RegExpMacroAssemblerTracer(Isolate* isolate, RegExpMacroAssembler* assembler);
virtual ~RegExpMacroAssemblerTracer();
virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); }
virtual bool CanReadUnaligned() { return assembler_->CanReadUnaligned(); }
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 52df648d9a..90ac1b9fa3 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -13,11 +13,11 @@
namespace v8 {
namespace internal {
-RegExpMacroAssembler::RegExpMacroAssembler(Zone* zone)
- : slow_safe_compiler_(false),
- global_mode_(NOT_GLOBAL),
- zone_(zone) {
-}
+RegExpMacroAssembler::RegExpMacroAssembler(Isolate* isolate, Zone* zone)
+ : slow_safe_compiler_(false),
+ global_mode_(NOT_GLOBAL),
+ isolate_(isolate),
+ zone_(zone) {}
RegExpMacroAssembler::~RegExpMacroAssembler() {
@@ -26,9 +26,9 @@ RegExpMacroAssembler::~RegExpMacroAssembler() {
#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
-NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Zone* zone)
- : RegExpMacroAssembler(zone) {
-}
+NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Isolate* isolate,
+ Zone* zone)
+ : RegExpMacroAssembler(isolate, zone) {}
NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index f72cc4d42d..60b83a4347 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -32,6 +32,7 @@ class RegExpMacroAssembler {
kARMImplementation,
kARM64Implementation,
kMIPSImplementation,
+ kPPCImplementation,
kX64Implementation,
kX87Implementation,
kBytecodeImplementation
@@ -42,7 +43,7 @@ class RegExpMacroAssembler {
kCheckStackLimit = true
};
- explicit RegExpMacroAssembler(Zone* zone);
+ RegExpMacroAssembler(Isolate* isolate, Zone* zone);
virtual ~RegExpMacroAssembler();
// The maximal number of pushes between stack checks. Users must supply
// kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
@@ -157,11 +158,13 @@ class RegExpMacroAssembler {
return global_mode_ == GLOBAL;
}
+ Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
private:
bool slow_safe_compiler_;
bool global_mode_;
+ Isolate* isolate_;
Zone* zone_;
};
@@ -184,7 +187,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
// capture positions.
enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
- explicit NativeRegExpMacroAssembler(Zone* zone);
+ NativeRegExpMacroAssembler(Isolate* isolate, Zone* zone);
virtual ~NativeRegExpMacroAssembler();
virtual bool CanReadUnaligned();
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index e1eac76c12..416f5865e2 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -22,6 +22,8 @@ function DoConstructRegExp(object, pattern, flags) {
flags = (pattern.global ? 'g' : '')
+ (pattern.ignoreCase ? 'i' : '')
+ (pattern.multiline ? 'm' : '');
+ if (harmony_unicode_regexps)
+ flags += (pattern.unicode ? 'u' : '');
if (harmony_regexps)
flags += (pattern.sticky ? 'y' : '');
pattern = pattern.source;
@@ -235,6 +237,7 @@ function RegExpToString() {
if (this.global) result += 'g';
if (this.ignoreCase) result += 'i';
if (this.multiline) result += 'm';
+ if (harmony_unicode_regexps && this.unicode) result += 'u';
if (harmony_regexps && this.sticky) result += 'y';
return result;
}
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index c52051aba3..c81950e8ed 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -15,13 +15,14 @@ namespace internal {
class Processor: public AstVisitor {
public:
- Processor(Variable* result, AstValueFactory* ast_value_factory)
+ Processor(Isolate* isolate, Variable* result,
+ AstValueFactory* ast_value_factory)
: result_(result),
result_assigned_(false),
is_set_(false),
in_try_(false),
factory_(ast_value_factory) {
- InitializeAstVisitor(ast_value_factory->zone());
+ InitializeAstVisitor(isolate, ast_value_factory->zone());
}
virtual ~Processor() { }
@@ -205,7 +206,6 @@ void Processor::VisitModuleDeclaration(ModuleDeclaration* node) {}
void Processor::VisitImportDeclaration(ImportDeclaration* node) {}
void Processor::VisitExportDeclaration(ExportDeclaration* node) {}
void Processor::VisitModuleLiteral(ModuleLiteral* node) {}
-void Processor::VisitModuleVariable(ModuleVariable* node) {}
void Processor::VisitModulePath(ModulePath* node) {}
void Processor::VisitModuleUrl(ModuleUrl* node) {}
void Processor::VisitEmptyStatement(EmptyStatement* node) {}
@@ -235,7 +235,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
scope->NewTemporary(info->ast_value_factory()->dot_result_string());
// The name string must be internalized at this point.
DCHECK(!result->name().is_null());
- Processor processor(result, info->ast_value_factory());
+ Processor processor(info->isolate(), result, info->ast_value_factory());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 6c99714593..109c1a52a9 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -113,11 +113,7 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
int loop_nesting_levels) {
SharedFunctionInfo* shared = function->shared();
- // See AlwaysFullCompiler (in compiler.cc) comment on why we need
- // Debug::has_break_points().
- if (!FLAG_use_osr ||
- isolate_->DebuggerHasBreakPoints() ||
- function->IsBuiltin()) {
+ if (!FLAG_use_osr || function->IsBuiltin()) {
return;
}
@@ -147,7 +143,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
- if (!isolate_->use_crankshaft() || isolate_->DebuggerHasBreakPoints()) return;
+ if (!isolate_->use_crankshaft()) return;
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 978429ea3b..7d82dfa846 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -312,8 +312,8 @@ function SHR(y) {
*/
// ECMA-262, section 11.4.1, page 46.
-function DELETE(key, strict) {
- return %DeleteProperty(%ToObject(this), %ToName(key), strict);
+function DELETE(key, language_mode) {
+ return %DeleteProperty(%ToObject(this), %ToName(key), language_mode);
}
@@ -377,7 +377,9 @@ function FILTER_KEY(key) {
function CALL_NON_FUNCTION() {
var delegate = %GetFunctionDelegate(this);
if (!IS_FUNCTION(delegate)) {
- throw %MakeTypeError('called_non_callable', [typeof this]);
+ var callsite = %RenderCallSite();
+ if (callsite == "") callsite = typeof this;
+ throw %MakeTypeError('called_non_callable', [callsite]);
}
return %Apply(delegate, this, arguments, 0, %_ArgumentsLength());
}
@@ -386,7 +388,9 @@ function CALL_NON_FUNCTION() {
function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
var delegate = %GetConstructorDelegate(this);
if (!IS_FUNCTION(delegate)) {
- throw %MakeTypeError('called_non_callable', [typeof this]);
+ var callsite = %RenderCallSite();
+ if (callsite == "") callsite = typeof this;
+ throw %MakeTypeError('called_non_callable', [callsite]);
}
return %Apply(delegate, this, arguments, 0, %_ArgumentsLength());
}
@@ -468,6 +472,12 @@ function TO_STRING() {
}
+// Convert the receiver to a string or symbol - forward to ToName.
+function TO_NAME() {
+ return %ToName(this);
+}
+
+
/* -------------------------------------
- - - C o n v e r s i o n s - - -
-------------------------------------
diff --git a/deps/v8/src/runtime/runtime-api.cc b/deps/v8/src/runtime/runtime-api.cc
deleted file mode 100644
index 740832e9c9..0000000000
--- a/deps/v8/src/runtime/runtime-api.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/arguments.h"
-#include "src/bootstrapper.h"
-#include "src/runtime/runtime.h"
-#include "src/runtime/runtime-utils.h"
-
-namespace v8 {
-namespace internal {
-
-RUNTIME_FUNCTION(Runtime_CreateApiFunction) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(FunctionTemplateInfo, data, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- return *isolate->factory()->CreateApiFunction(data, prototype);
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsTemplate) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, arg, 0);
- bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
- return isolate->heap()->ToBoolean(result);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetTemplateField) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(HeapObject, templ, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- int offset = index * kPointerSize + HeapObject::kHeaderSize;
- InstanceType type = templ->map()->instance_type();
- RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE ||
- type == OBJECT_TEMPLATE_INFO_TYPE);
- RUNTIME_ASSERT(offset > 0);
- if (type == FUNCTION_TEMPLATE_INFO_TYPE) {
- RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize);
- } else {
- RUNTIME_ASSERT(offset < ObjectTemplateInfo::kSize);
- }
- return *HeapObject::RawField(templ, offset);
-}
-
-
-// Transform getter or setter into something DefineAccessor can handle.
-static Handle<Object> InstantiateAccessorComponent(Isolate* isolate,
- Handle<Object> component) {
- if (component->IsUndefined()) return isolate->factory()->undefined_value();
- Handle<FunctionTemplateInfo> info =
- Handle<FunctionTemplateInfo>::cast(component);
- return Utils::OpenHandle(*Utils::ToLocal(info)->GetFunction());
-}
-
-
-RUNTIME_FUNCTION(Runtime_DefineApiAccessorProperty) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
- CONVERT_SMI_ARG_CHECKED(attribute, 4);
- RUNTIME_ASSERT(getter->IsUndefined() || getter->IsFunctionTemplateInfo());
- RUNTIME_ASSERT(setter->IsUndefined() || setter->IsFunctionTemplateInfo());
- RUNTIME_ASSERT(PropertyDetails::AttributesField::is_valid(
- static_cast<PropertyAttributes>(attribute)));
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::DefineAccessor(
- object, name, InstantiateAccessorComponent(isolate, getter),
- InstantiateAccessorComponent(isolate, setter),
- static_cast<PropertyAttributes>(attribute)));
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_AddPropertyForTemplate) {
- HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 4);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
- RUNTIME_ASSERT(
- (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- // Compute attributes.
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(unchecked_attributes);
-
-#ifdef DEBUG
- bool duplicate;
- if (key->IsName()) {
- LookupIterator it(object, Handle<Name>::cast(key),
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- DCHECK(maybe.has_value);
- duplicate = it.IsFound();
- } else {
- uint32_t index = 0;
- RUNTIME_ASSERT(key->ToArrayIndex(&index));
- Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index);
- if (!maybe.has_value) return isolate->heap()->exception();
- duplicate = maybe.value;
- }
- if (duplicate) {
- Handle<Object> args[1] = {key};
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError("duplicate_template_property", HandleVector(args, 1)));
- }
-#endif
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::DefineObjectProperty(object, key, value, attributes));
- return *result;
-}
-}
-} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index a017236a54..6814385183 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -420,7 +420,7 @@ static void CollectElementIndices(Handle<JSObject> object, uint32_t range,
uint32_t length = static_cast<uint32_t>(DoubleToInt32(length_num));
ElementsAccessor* accessor = object->GetElementsAccessor();
for (uint32_t i = 0; i < length; i++) {
- if (accessor->HasElement(object, object, i)) {
+ if (accessor->HasElement(object, i)) {
indices->Add(i);
}
}
@@ -687,7 +687,7 @@ static bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
ElementsAccessor* accessor = receiver->GetElementsAccessor();
for (uint32_t index = 0; index < length; index++) {
HandleScope loop_scope(isolate);
- if (accessor->HasElement(receiver, receiver, index)) {
+ if (accessor->HasElement(receiver, index)) {
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, element, accessor->Get(receiver, receiver, index),
@@ -850,7 +850,8 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) {
}
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS:
- DCHECK_EQ(0, length);
+ case DICTIONARY_ELEMENTS:
+ DCHECK_EQ(0u, length);
break;
default:
UNREACHABLE();
@@ -978,7 +979,7 @@ RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
ElementsAccessor* accessor = array->GetElementsAccessor();
int holes = 0;
for (int i = 0; i < length; i += increment) {
- if (!accessor->HasElement(array, array, i, elements)) {
+ if (!accessor->HasElement(array, i, elements)) {
++holes;
}
}
@@ -1037,6 +1038,7 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
static Object* ArrayConstructorCommon(Isolate* isolate,
Handle<JSFunction> constructor,
+ Handle<JSFunction> original_constructor,
Handle<AllocationSite> site,
Arguments* caller_args) {
Factory* factory = isolate->factory();
@@ -1108,6 +1110,19 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
// We must mark the allocationsite as un-inlinable.
site->SetDoNotInlineCall();
}
+
+ // Set up the prototoype using original function.
+ // TODO(dslomov): instead of setting the __proto__,
+ // use and cache the correct map.
+ if (*original_constructor != *constructor) {
+ if (original_constructor->has_instance_prototype()) {
+ Handle<Object> prototype =
+ handle(original_constructor->instance_prototype(), isolate);
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSObject::SetPrototype(array, prototype, false));
+ }
+ }
+
return *array;
}
@@ -1141,7 +1156,27 @@ RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
DCHECK(!site->SitePointsToLiteral());
}
- return ArrayConstructorCommon(isolate, constructor, site, caller_args);
+ return ArrayConstructorCommon(isolate, constructor, constructor, site,
+ caller_args);
+}
+
+
+RUNTIME_FUNCTION(Runtime_ArrayConstructorWithSubclassing) {
+ HandleScope scope(isolate);
+ int args_length = args.length();
+ CHECK(args_length >= 2);
+
+ // This variables and checks work around -Werror=strict-overflow.
+ int pre_last_arg_index = args_length - 2;
+ int last_arg_index = args_length - 1;
+ CHECK(pre_last_arg_index >= 0);
+ CHECK(last_arg_index >= 0);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, pre_last_arg_index);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, original_constructor, last_arg_index);
+ Arguments caller_args(args_length - 2, args.arguments());
+ return ArrayConstructorCommon(isolate, constructor, original_constructor,
+ Handle<AllocationSite>::null(), &caller_args);
}
@@ -1160,7 +1195,7 @@ RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
DCHECK(arg_count == caller_args->length());
}
#endif
- return ArrayConstructorCommon(isolate, constructor,
+ return ArrayConstructorCommon(isolate, constructor, constructor,
Handle<AllocationSite>::null(), caller_args);
}
@@ -1170,7 +1205,8 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
RUNTIME_ASSERT(!array->HasExternalArrayElements() &&
- !array->HasFixedTypedArrayElements());
+ !array->HasFixedTypedArrayElements() &&
+ !array->IsJSGlobalProxy());
JSObject::NormalizeElements(array);
return *array;
}
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 7c827f0bd9..e88a76ac9e 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -38,6 +38,24 @@ RUNTIME_FUNCTION(Runtime_ThrowUnsupportedSuperError) {
}
+RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("constructor_noncallable", HandleVector<Object>(NULL, 0)));
+}
+
+
+RUNTIME_FUNCTION(Runtime_ThrowArrayNotSubclassableError) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("array_not_subclassable", HandleVector<Object>(NULL, 0)));
+}
+
+
RUNTIME_FUNCTION(Runtime_ToMethod) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -107,6 +125,11 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
: isolate->factory()->empty_string();
constructor->shared()->set_name(*name_string);
+ if (!super_class->IsTheHole()) {
+ Handle<Code> stub(isolate->builtins()->JSConstructStubForDerived());
+ constructor->shared()->set_construct_stub(*stub);
+ }
+
JSFunction::SetPrototype(constructor, prototype);
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -153,66 +176,23 @@ RUNTIME_FUNCTION(Runtime_DefineClassMethod) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
uint32_t index;
- if (key->ToArrayIndex(&index)) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetOwnElement(object, index, function, STRICT));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Runtime::ToName(isolate, key));
if (name->AsArrayIndex(&index)) {
RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetOwnElement(object, index, function, STRICT));
+ isolate,
+ JSObject::SetOwnElement(object, index, function, DONT_ENUM, STRICT));
} else {
RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(object, name, function, NONE));
+ isolate, JSObject::SetOwnPropertyIgnoreAttributes(object, name,
+ function, DONT_ENUM));
}
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DefineClassGetter) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Runtime::ToName(isolate, key));
- RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::DefineAccessor(object, name, getter,
- isolate->factory()->null_value(), NONE));
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_DefineClassSetter) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Runtime::ToName(isolate, key));
- RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::DefineAccessor(object, name, isolate->factory()->null_value(),
- setter, NONE));
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_ClassGetSourceCode) {
HandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -325,7 +305,7 @@ RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, StrictMode strict_mode) {
+ Handle<Object> value, LanguageMode language_mode) {
if (home_object->IsAccessCheckNeeded() &&
!isolate->MayNamedAccess(home_object, name, v8::ACCESS_SET)) {
isolate->ReportFailedAccessCheck(home_object, v8::ACCESS_SET);
@@ -340,9 +320,8 @@ static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Object::SetProperty(&it, value, strict_mode,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED,
- Object::SUPER_PROPERTY));
+ Object::SetSuperProperty(&it, value, language_mode,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED));
return *result;
}
@@ -351,7 +330,7 @@ static Object* StoreElementToSuper(Isolate* isolate,
Handle<JSObject> home_object,
Handle<Object> receiver, uint32_t index,
Handle<Object> value,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
if (home_object->IsAccessCheckNeeded() &&
!isolate->MayIndexedAccess(home_object, index, v8::ACCESS_SET)) {
isolate->ReportFailedAccessCheck(home_object, v8::ACCESS_SET);
@@ -366,7 +345,7 @@ static Object* StoreElementToSuper(Isolate* isolate,
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
Object::SetElementWithReceiver(isolate, proto, receiver, index, value,
- strict_mode));
+ language_mode));
return *result;
}
@@ -397,21 +376,23 @@ RUNTIME_FUNCTION(Runtime_StoreToSuper_Sloppy) {
static Object* StoreKeyedToSuper(Isolate* isolate, Handle<JSObject> home_object,
Handle<Object> receiver, Handle<Object> key,
- Handle<Object> value, StrictMode strict_mode) {
+ Handle<Object> value,
+ LanguageMode language_mode) {
uint32_t index;
if (key->ToArrayIndex(&index)) {
return StoreElementToSuper(isolate, home_object, receiver, index, value,
- strict_mode);
+ language_mode);
}
Handle<Name> name;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
Runtime::ToName(isolate, key));
if (name->AsArrayIndex(&index)) {
return StoreElementToSuper(isolate, home_object, receiver, index, value,
- strict_mode);
+ language_mode);
}
- return StoreToSuper(isolate, home_object, receiver, name, value, strict_mode);
+ return StoreToSuper(isolate, home_object, receiver, name, value,
+ language_mode);
}
@@ -439,50 +420,22 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
}
-RUNTIME_FUNCTION(Runtime_DefaultConstructorSuperCall) {
+RUNTIME_FUNCTION(Runtime_HandleStepInForDerivedConstructors) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
-
- // Compute the frame holding the arguments.
- JavaScriptFrameIterator it(isolate);
- it.AdvanceToArgumentsFrame();
- JavaScriptFrame* frame = it.frame();
-
- Handle<JSFunction> function(frame->function(), isolate);
- Handle<Object> receiver(frame->receiver(), isolate);
-
- Handle<Object> proto_function;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, proto_function,
- Runtime::GetPrototype(isolate, function));
-
- // Get the actual number of provided arguments.
- const int argc = frame->ComputeParametersCount();
-
- // Loose upper bound to allow fuzzing. We'll most likely run out of
- // stack space before hitting this limit.
- static int kMaxArgc = 1000000;
- RUNTIME_ASSERT(argc >= 0 && argc <= kMaxArgc);
-
- // If there are too many arguments, allocate argv via malloc.
- const int argv_small_size = 10;
- Handle<Object> argv_small_buffer[argv_small_size];
- SmartArrayPointer<Handle<Object> > argv_large_buffer;
- Handle<Object>* argv = argv_small_buffer;
- if (argc > argv_small_size) {
- argv = new Handle<Object>[argc];
- if (argv == NULL) return isolate->StackOverflow();
- argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Debug* debug = isolate->debug();
+ // Handle stepping into constructors if step into is active.
+ if (debug->StepInActive()) {
+ debug->HandleStepIn(function, Handle<Object>::null(), 0, true);
}
+ return *isolate->factory()->undefined_value();
+}
- for (int i = 0; i < argc; ++i) {
- argv[i] = handle(frame->GetParameter(i), isolate);
- }
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, proto_function, receiver, argc, argv, false));
- return *result;
+RUNTIME_FUNCTION(RuntimeReference_DefaultConstructorCallSuper) {
+ UNREACHABLE();
+ return nullptr;
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index abdd056998..ffffbdd2f2 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -296,12 +296,11 @@ RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
}
-static Handle<JSWeakCollection> WeakCollectionInitialize(
+void Runtime::WeakCollectionInitialize(
Isolate* isolate, Handle<JSWeakCollection> weak_collection) {
DCHECK(weak_collection->map()->inobject_properties() == 0);
Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
weak_collection->set_table(*table);
- return weak_collection;
}
@@ -309,7 +308,8 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- return *WeakCollectionInitialize(isolate, weak_collection);
+ Runtime::WeakCollectionInitialize(isolate, weak_collection);
+ return *weak_collection;
}
@@ -341,6 +341,24 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
}
+bool Runtime::WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key) {
+ DCHECK(key->IsJSReceiver() || key->IsSymbol());
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ DCHECK(table->IsKey(*key));
+ bool was_present = false;
+ Handle<ObjectHashTable> new_table =
+ ObjectHashTable::Remove(table, key, &was_present);
+ weak_collection->set_table(*new_table);
+ if (*table != *new_table) {
+ // Zap the old table since we didn't record slots for its elements.
+ table->FillWithHoles(0, table->length());
+ }
+ return was_present;
+}
+
+
RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -350,15 +368,23 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
RUNTIME_ASSERT(table->IsKey(*key));
- bool was_present = false;
- Handle<ObjectHashTable> new_table =
- ObjectHashTable::Remove(table, key, &was_present);
+ bool was_present = Runtime::WeakCollectionDelete(weak_collection, key);
+ return isolate->heap()->ToBoolean(was_present);
+}
+
+
+void Runtime::WeakCollectionSet(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key, Handle<Object> value) {
+ DCHECK(key->IsJSReceiver() || key->IsSymbol());
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ DCHECK(table->IsKey(*key));
+ Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
weak_collection->set_table(*new_table);
if (*table != *new_table) {
// Zap the old table since we didn't record slots for its elements.
table->FillWithHoles(0, table->length());
}
- return isolate->heap()->ToBoolean(was_present);
}
@@ -372,12 +398,7 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
RUNTIME_ASSERT(table->IsKey(*key));
- Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
- weak_collection->set_table(*new_table);
- if (*table != *new_table) {
- // Zap the old table since we didn't record slots for its elements.
- table->FillWithHoles(0, table->length());
- }
+ Runtime::WeakCollectionSet(weak_collection, key, value);
return *weak_collection;
}
@@ -414,14 +435,9 @@ RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
- // TODO(adamk): Currently this runtime function is only called three times per
- // isolate. If it's called more often, the map should be moved into the
- // strong root list.
- Handle<Map> map =
- isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
- Handle<JSWeakMap> weakmap =
- Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map));
- return *WeakCollectionInitialize(isolate, weakmap);
+ Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
+ Runtime::WeakCollectionInitialize(isolate, weakmap);
+ return *weakmap;
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index ebd0c13f0f..0958da13a1 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -49,29 +49,25 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized) {
CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
DCHECK(isolate->use_crankshaft());
- Handle<Code> unoptimized(function->shared()->code());
- if (function->shared()->optimization_disabled() ||
- isolate->DebuggerHasBreakPoints()) {
- // If the function is not optimizable or debugger is active continue
- // using the code from the full compiler.
- if (FLAG_trace_opt) {
- PrintF("[failed to optimize ");
- function->PrintName();
- PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
- function->shared()->optimization_disabled() ? "F" : "T",
- isolate->DebuggerHasBreakPoints() ? "T" : "F");
- }
- function->ReplaceCode(*unoptimized);
- return function->code();
- }
-
Compiler::ConcurrencyMode mode =
concurrent ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
Handle<Code> code;
+ Handle<Code> unoptimized(function->shared()->code());
if (Compiler::GetOptimizedCode(function, unoptimized, mode).ToHandle(&code)) {
+ // Optimization succeeded, return optimized code.
function->ReplaceCode(*code);
} else {
- function->ReplaceCode(function->shared()->code());
+ // Optimization failed, get unoptimized code.
+ if (isolate->has_pending_exception()) { // Possible stack overflow.
+ return isolate->heap()->exception();
+ }
+ code = Handle<Code>(function->shared()->code(), isolate);
+ if (code->kind() != Code::FUNCTION &&
+ code->kind() != Code::OPTIMIZED_FUNCTION) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, code, Compiler::GetUnoptimizedCode(function));
+ }
+ function->ReplaceCode(*code);
}
DCHECK(function->code()->kind() == Code::FUNCTION ||
@@ -290,8 +286,15 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
// match. Fix heuristics for reenabling optimizations!
function->shared()->increment_deopt_count();
- // TODO(titzer): Do not install code into the function.
- function->ReplaceCode(*result);
+ if (result->is_turbofanned()) {
+ // TurboFanned OSR code cannot be installed into the function.
+ // But the function is obviously hot, so optimize it next time.
+ function->ReplaceCode(
+ isolate->builtins()->builtin(Builtins::kCompileOptimized));
+ } else {
+ // Crankshafted OSR code can be installed into the function.
+ function->ReplaceCode(*result);
+ }
return *result;
}
}
@@ -391,7 +394,7 @@ RUNTIME_FUNCTION(Runtime_CompileString) {
static ObjectPair CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
Handle<Object> receiver,
- StrictMode strict_mode,
+ LanguageMode language_mode,
int scope_position) {
Handle<Context> context = Handle<Context>(isolate->context());
Handle<Context> native_context = Handle<Context>(context->native_context());
@@ -415,7 +418,7 @@ static ObjectPair CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<JSFunction> compiled;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, compiled,
- Compiler::GetFunctionFromEval(source, outer_info, context, strict_mode,
+ Compiler::GetFunctionFromEval(source, outer_info, context, language_mode,
restriction, scope_position),
MakePair(isolate->heap()->exception(), NULL));
return MakePair(*compiled, *receiver);
@@ -439,13 +442,13 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ResolvePossiblyDirectEval) {
}
DCHECK(args[4]->IsSmi());
- DCHECK(args.smi_at(4) == SLOPPY || args.smi_at(4) == STRICT);
- StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(4));
+ DCHECK(is_valid_language_mode(args.smi_at(4)));
+ LanguageMode language_mode = static_cast<LanguageMode>(args.smi_at(4));
DCHECK(args[5]->IsSmi());
Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
isolate);
return CompileGlobalEval(isolate, args.at<String>(1), outer_info,
- args.at<Object>(3), strict_mode, args.smi_at(5));
+ args.at<Object>(3), language_mode, args.smi_at(5));
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 65d8fc63ac..4caf27de0f 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -70,7 +70,7 @@ RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
- if (FLAG_log_timer_events) LOG(isolate, CurrentTimeEvent());
+ if (FLAG_log_timer_events || FLAG_prof_cpp) LOG(isolate, CurrentTimeEvent());
// According to ECMA-262, section 15.9.1, page 117, the precision of
// the number in a Date object representing a particular instant in
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 12c5a0d84f..f8c123867a 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -136,7 +136,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
isolate, element_or_char,
Runtime::GetElementOrCharAt(isolate, obj, index));
details->set(0, *element_or_char);
- details->set(1, PropertyDetails(NONE, FIELD, 0).AsSmi());
+ details->set(1, PropertyDetails(NONE, DATA, 0).AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
}
@@ -158,7 +158,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
details->set(0, *value);
// TODO(verwaest): Get rid of this random way of handling interceptors.
PropertyDetails d = it.state() == LookupIterator::INTERCEPTOR
- ? PropertyDetails(NONE, FIELD, 0)
+ ? PropertyDetails(NONE, DATA, 0)
: it.property_details();
details->set(1, d.AsSmi());
details->set(
@@ -246,7 +246,8 @@ RUNTIME_FUNCTION(Runtime_DebugIndexedInterceptorElementValue) {
CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::GetElementWithInterceptor(obj, obj, index));
+ isolate, result,
+ JSObject::GetElementWithInterceptor(obj, obj, index, true));
return *result;
}
@@ -632,7 +633,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
- if (!receiver->IsJSObject() && shared->strict_mode() == SLOPPY &&
+ if (!receiver->IsJSObject() && is_sloppy(shared->language_mode()) &&
!function->IsBuiltin()) {
// If the receiver is not a JSObject and the function is not a
// builtin or strict-mode we have hit an optimization where a
@@ -1207,14 +1208,14 @@ class ScopeIterator {
info.MarkAsEval();
info.SetContext(Handle<Context>(function_->context()));
}
- if (Parser::Parse(&info) && Scope::Analyze(&info)) {
+ if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
RetrieveScopeChain(scope, shared_info);
} else {
// Function code
CompilationInfoWithZone info(shared_info);
- if (Parser::Parse(&info) && Scope::Analyze(&info)) {
+ if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
RetrieveScopeChain(scope, shared_info);
@@ -1490,7 +1491,8 @@ class ScopeIterator {
Handle<SharedFunctionInfo> shared_info) {
if (scope != NULL) {
int source_position = shared_info->code()->SourcePosition(frame_->pc());
- scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
+ scope->GetNestedScopeChain(isolate_, &nested_scope_chain_,
+ source_position);
} else {
// A failed reparse indicates that the preparser has diverged from the
// parser or that the preparse data given to the initial parse has been
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index e25b6592e0..5d49b23681 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -28,7 +28,7 @@ RUNTIME_FUNCTION(Runtime_IsSloppyModeFunction) {
}
JSFunction* function = JSFunction::cast(callable);
SharedFunctionInfo* shared = function->shared();
- return isolate->heap()->ToBoolean(shared->strict_mode() == SLOPPY);
+ return isolate->heap()->ToBoolean(is_sloppy(shared->language_mode()));
}
@@ -48,7 +48,7 @@ RUNTIME_FUNCTION(Runtime_GetDefaultReceiver) {
JSFunction* function = JSFunction::cast(callable);
SharedFunctionInfo* shared = function->shared();
- if (shared->native() || shared->strict_mode() == STRICT) {
+ if (shared->native() || is_strict(shared->language_mode())) {
return isolate->heap()->undefined_value();
}
// Returns undefined for strict or native functions, or
@@ -271,14 +271,16 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
target_shared->set_feedback_vector(source_shared->feedback_vector());
- target_shared->set_formal_parameter_count(
- source_shared->formal_parameter_count());
+ target_shared->set_internal_formal_parameter_count(
+ source_shared->internal_formal_parameter_count());
target_shared->set_script(source_shared->script());
target_shared->set_start_position_and_type(
source_shared->start_position_and_type());
target_shared->set_end_position(source_shared->end_position());
bool was_native = target_shared->native();
target_shared->set_compiler_hints(source_shared->compiler_hints());
+ target_shared->set_opt_count_and_bailout_reason(
+ source_shared->opt_count_and_bailout_reason());
target_shared->set_native(was_native);
target_shared->set_profiler_ticks(source_shared->profiler_ticks());
@@ -326,6 +328,34 @@ RUNTIME_FUNCTION(Runtime_SetNativeFlag) {
}
+RUNTIME_FUNCTION(Runtime_IsConstructor) {
+ HandleScope handles(isolate);
+ RUNTIME_ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+
+ // TODO(caitp): implement this in a better/simpler way, allow inlining via TF
+ if (object->IsJSFunction()) {
+ Handle<JSFunction> func = Handle<JSFunction>::cast(object);
+ bool should_have_prototype = func->should_have_prototype();
+ if (func->shared()->bound()) {
+ Handle<FixedArray> bound_args =
+ Handle<FixedArray>(FixedArray::cast(func->function_bindings()));
+ Handle<Object> bound_function(
+ JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)),
+ isolate);
+ if (bound_function->IsJSFunction()) {
+ Handle<JSFunction> bound = Handle<JSFunction>::cast(bound_function);
+ DCHECK(!bound->shared()->bound());
+ should_have_prototype = bound->should_have_prototype();
+ }
+ }
+ return isolate->heap()->ToBoolean(should_have_prototype);
+ }
+ return isolate->heap()->false_value();
+}
+
+
RUNTIME_FUNCTION(Runtime_SetInlineBuiltinFlag) {
SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
@@ -355,7 +385,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(Isolate* isolate,
JSFunction* inlined_function = functions[inlined_jsframe_index];
SlotRefValueBuilder slot_refs(
frame, inlined_jsframe_index,
- inlined_function->shared()->formal_parameter_count());
+ inlined_function->shared()->internal_formal_parameter_count());
int args_count = slot_refs.args_length();
@@ -435,8 +465,7 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
for (int j = 0; j < argc; j++, i++) {
new_bindings->set(i, *arguments[j + 1]);
}
- new_bindings->set_map_no_write_barrier(
- isolate->heap()->fixed_cow_array_map());
+ new_bindings->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
bound_function->set_function_bindings(*new_bindings);
// Update length. Have to remove the prototype first so that map migration
@@ -462,8 +491,8 @@ RUNTIME_FUNCTION(Runtime_BoundFunctionGetBindings) {
if (callable->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
if (function->shared()->bound()) {
+ RUNTIME_ASSERT(function->function_bindings()->IsFixedArray());
Handle<FixedArray> bindings(function->function_bindings());
- RUNTIME_ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map());
return *isolate->factory()->NewJSArrayWithElements(bindings);
}
}
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index 94d9f423b0..5e01651544 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -6,6 +6,7 @@
#ifdef V8_I18N_SUPPORT
#include "src/v8.h"
+#include "src/api-natives.h"
#include "src/arguments.h"
#include "src/i18n.h"
#include "src/runtime/runtime-utils.h"
@@ -317,7 +318,7 @@ RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
Handle<JSObject> local_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, local_object,
- Execution::InstantiateObject(date_format_template));
+ ApiNatives::InstantiateObject(date_format_template));
// Set date time formatter as internal field of the resulting JS object.
icu::SimpleDateFormat* date_format =
@@ -412,7 +413,7 @@ RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
Handle<JSObject> local_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, local_object,
- Execution::InstantiateObject(number_format_template));
+ ApiNatives::InstantiateObject(number_format_template));
// Set number formatter as internal field of the resulting JS object.
icu::DecimalFormat* number_format =
@@ -517,7 +518,7 @@ RUNTIME_FUNCTION(Runtime_CreateCollator) {
// Create an empty object wrapper.
Handle<JSObject> local_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, local_object, Execution::InstantiateObject(collator_template));
+ isolate, local_object, ApiNatives::InstantiateObject(collator_template));
// Set collator as internal field of the resulting JS object.
icu::Collator* collator =
@@ -616,7 +617,7 @@ RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
Handle<JSObject> local_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, local_object,
- Execution::InstantiateObject(break_iterator_template));
+ ApiNatives::InstantiateObject(break_iterator_template));
// Set break iterator as internal field of the resulting JS object.
icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator(
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 79dfaced96..50b61921f5 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -7,6 +7,9 @@
#include "src/arguments.h"
#include "src/bootstrapper.h"
#include "src/debug.h"
+#include "src/messages.h"
+#include "src/parser.h"
+#include "src/prettyprinter.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -153,6 +156,36 @@ RUNTIME_FUNCTION(Runtime_CollectStackTrace) {
}
+RUNTIME_FUNCTION(Runtime_RenderCallSite) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ MessageLocation location;
+ isolate->ComputeLocation(&location);
+ if (location.start_pos() == -1) return isolate->heap()->empty_string();
+
+ Zone zone;
+ if (location.function()->shared()->is_function()) {
+ CompilationInfo info(location.function(), &zone);
+ if (!Parser::ParseStatic(&info)) {
+ isolate->clear_pending_exception();
+ return isolate->heap()->empty_string();
+ }
+ CallPrinter printer(isolate, &zone);
+ const char* string = printer.Print(info.function(), location.start_pos());
+ return *isolate->factory()->NewStringFromAsciiChecked(string);
+ }
+
+ CompilationInfo info(location.script(), &zone);
+ if (!Parser::ParseStatic(&info)) {
+ isolate->clear_pending_exception();
+ return isolate->heap()->empty_string();
+ }
+ CallPrinter printer(isolate, &zone);
+ const char* string = printer.Print(info.function(), location.start_pos());
+ return *isolate->factory()->NewStringFromAsciiChecked(string);
+}
+
+
RUNTIME_FUNCTION(Runtime_GetFromCache) {
SealHandleScope shs(isolate);
// This is only called from codegen, so checks might be more lax.
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 6397ad15de..68dfa49af8 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -60,19 +60,15 @@ RUNTIME_FUNCTION(Runtime_ConstructDouble) {
RUNTIME_FUNCTION(Runtime_RemPiO2) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 1);
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_gc;
+ DCHECK(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- Factory* factory = isolate->factory();
- double y[2] = {0.0, 0.0};
- int n = fdlibm::rempio2(x, y);
- Handle<FixedArray> array = factory->NewFixedArray(3);
- Handle<HeapNumber> y0 = factory->NewHeapNumber(y[0]);
- Handle<HeapNumber> y1 = factory->NewHeapNumber(y[1]);
- array->set(0, Smi::FromInt(n));
- array->set(1, *y0);
- array->set(2, *y1);
- return *factory->NewJSArrayWithElements(array);
+ CONVERT_ARG_CHECKED(JSTypedArray, result, 1);
+ RUNTIME_ASSERT(result->byte_length() == Smi::FromInt(2 * sizeof(double)));
+ void* backing_store = JSArrayBuffer::cast(result->buffer())->backing_store();
+ double* y = static_cast<double*>(backing_store);
+ return Smi::FromInt(fdlibm::rempio2(x, y));
}
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index bc0bb3656d..a7a15e476b 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -229,8 +229,9 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- double value = StringToDouble(isolate->unicode_cache(), subject,
- ALLOW_TRAILING_JUNK, base::OS::nan_value());
+ double value =
+ StringToDouble(isolate->unicode_cache(), subject, ALLOW_TRAILING_JUNK,
+ std::numeric_limits<double>::quiet_NaN());
return *isolate->factory()->NewNumber(value);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 407f237794..96d9331038 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -99,7 +99,7 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
- StrictMode strict_mode) {
+ LanguageMode language_mode) {
if (object->IsUndefined() || object->IsNull()) {
Handle<Object> args[2] = {key, object};
THROW_NEW_ERROR(isolate, NewTypeError("non_object_property_store",
@@ -117,7 +117,7 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
}
Handle<Name> name = Handle<Name>::cast(name_object);
return Object::SetProperty(Handle<JSProxy>::cast(object), name, value,
- strict_mode);
+ language_mode);
}
// Check if the given key is an array index.
@@ -148,7 +148,7 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
}
MaybeHandle<Object> result = JSObject::SetElement(
- js_object, index, value, NONE, strict_mode, true, SET_PROPERTY);
+ js_object, index, value, NONE, language_mode, true, SET_PROPERTY);
JSObject::ValidateElements(js_object);
return result.is_null() ? result : value;
@@ -166,11 +166,11 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
isolate, value, Execution::ToNumber(isolate, value), Object);
}
}
- return JSObject::SetElement(js_object, index, value, NONE, strict_mode,
+ return JSObject::SetElement(js_object, index, value, NONE, language_mode,
true, SET_PROPERTY);
} else {
if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
- return Object::SetProperty(object, name, value, strict_mode);
+ return Object::SetProperty(object, name, value, language_mode);
}
}
@@ -184,17 +184,17 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
// TODO(verwaest): Support non-JSObject receivers.
if (!object->IsJSObject()) return value;
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- return JSObject::SetElement(js_object, index, value, NONE, strict_mode,
+ return JSObject::SetElement(js_object, index, value, NONE, language_mode,
true, SET_PROPERTY);
}
- return Object::SetProperty(object, name, value, strict_mode);
+ return Object::SetProperty(object, name, value, language_mode);
}
MaybeHandle<Object> Runtime::DefineObjectProperty(Handle<JSObject> js_object,
Handle<Object> key,
Handle<Object> value,
- PropertyAttributes attr) {
+ PropertyAttributes attrs) {
Isolate* isolate = js_object->GetIsolate();
// Check if the given key is an array index.
uint32_t index;
@@ -210,19 +210,19 @@ MaybeHandle<Object> Runtime::DefineObjectProperty(Handle<JSObject> js_object,
return value;
}
- return JSObject::SetElement(js_object, index, value, attr, SLOPPY, false,
+ return JSObject::SetElement(js_object, index, value, attrs, SLOPPY, false,
DEFINE_PROPERTY);
}
if (key->IsName()) {
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr, SLOPPY, false,
+ return JSObject::SetElement(js_object, index, value, attrs, SLOPPY, false,
DEFINE_PROPERTY);
} else {
if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
return JSObject::SetOwnPropertyIgnoreAttributes(js_object, name, value,
- attr);
+ attrs);
}
}
@@ -233,11 +233,11 @@ MaybeHandle<Object> Runtime::DefineObjectProperty(Handle<JSObject> js_object,
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr, SLOPPY, false,
+ return JSObject::SetElement(js_object, index, value, attrs, SLOPPY, false,
DEFINE_PROPERTY);
} else {
return JSObject::SetOwnPropertyIgnoreAttributes(js_object, name, value,
- attr);
+ attrs);
}
}
@@ -601,38 +601,12 @@ RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
DisallowHeapAllocation no_allocation;
Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
Handle<Name> key = Handle<Name>::cast(key_obj);
- if (receiver->HasFastProperties()) {
- // Attempt to use lookup cache.
- Handle<Map> receiver_map(receiver->map(), isolate);
- KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
- int index = keyed_lookup_cache->Lookup(receiver_map, key);
- if (index != -1) {
- // Doubles are not cached, so raw read the value.
- return receiver->RawFastPropertyAt(
- FieldIndex::ForKeyedLookupCacheIndex(*receiver_map, index));
- }
- // Lookup cache miss. Perform lookup and update the cache if
- // appropriate.
- LookupIterator it(receiver, key, LookupIterator::OWN);
- if (it.state() == LookupIterator::DATA &&
- it.property_details().type() == FIELD) {
- FieldIndex field_index = it.GetFieldIndex();
- // Do not track double fields in the keyed lookup cache. Reading
- // double values requires boxing.
- if (!it.representation().IsDouble()) {
- keyed_lookup_cache->Update(receiver_map, key,
- field_index.GetKeyedLookupCacheIndex());
- }
- AllowHeapAllocation allow_allocation;
- return *JSObject::FastPropertyAt(receiver, it.representation(),
- field_index);
- }
- } else {
+ if (!receiver->HasFastProperties()) {
// Attempt dictionary lookup.
NameDictionary* dictionary = receiver->property_dictionary();
int entry = dictionary->FindEntry(key);
if ((entry != NameDictionary::kNotFound) &&
- (dictionary->DetailsAt(entry).type() == FIELD)) {
+ (dictionary->DetailsAt(entry).type() == DATA)) {
Object* value = dictionary->ValueAt(entry);
if (!receiver->IsGlobalObject()) return value;
value = PropertyCell::cast(value)->value();
@@ -690,12 +664,7 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
- RUNTIME_ASSERT(
- (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- // Compute attributes.
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(unchecked_attributes);
+ CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
#ifdef DEBUG
uint32_t index = 0;
@@ -709,7 +678,7 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- JSObject::SetOwnPropertyIgnoreAttributes(object, key, value, attributes));
+ JSObject::SetOwnPropertyIgnoreAttributes(object, key, value, attrs));
return *result;
}
@@ -721,13 +690,13 @@ RUNTIME_FUNCTION(Runtime_SetProperty) {
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_arg, 3);
- StrictMode strict_mode = strict_mode_arg;
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode_arg, 3);
+ LanguageMode language_mode = language_mode_arg;
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+ Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
return *result;
}
@@ -741,20 +710,15 @@ RUNTIME_FUNCTION(Runtime_AddElement) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
- RUNTIME_ASSERT(
- (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- // Compute attributes.
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(unchecked_attributes);
+ CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
uint32_t index = 0;
key->ToArrayIndex(&index);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::SetElement(object, index, value, attributes,
- SLOPPY, false, DEFINE_PROPERTY));
+ isolate, result, JSObject::SetElement(object, index, value, attrs, SLOPPY,
+ false, DEFINE_PROPERTY));
return *result;
}
@@ -764,13 +728,10 @@ RUNTIME_FUNCTION(Runtime_DeleteProperty) {
DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
- JSReceiver::DeleteMode delete_mode = strict_mode == STRICT
- ? JSReceiver::STRICT_DELETION
- : JSReceiver::NORMAL_DELETION;
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 2);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSReceiver::DeleteProperty(object, key, delete_mode));
+ isolate, result, JSReceiver::DeleteProperty(object, key, language_mode));
return *result;
}
@@ -1066,9 +1027,9 @@ RUNTIME_FUNCTION(Runtime_GetOwnElementNames) {
}
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- int n = obj->NumberOfOwnElements(static_cast<PropertyAttributes>(NONE));
+ int n = obj->NumberOfOwnElements(NONE);
Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
- obj->GetOwnElementKeys(*names, static_cast<PropertyAttributes>(NONE));
+ obj->GetOwnElementKeys(*names, NONE);
return *isolate->factory()->NewJSArrayWithElements(names);
}
@@ -1236,35 +1197,6 @@ RUNTIME_FUNCTION(Runtime_Typeof) {
}
-RUNTIME_FUNCTION(Runtime_Booleanize) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(Object, value_raw, 0);
- CONVERT_SMI_ARG_CHECKED(token_raw, 1);
- intptr_t value = reinterpret_cast<intptr_t>(value_raw);
- Token::Value token = static_cast<Token::Value>(token_raw);
- switch (token) {
- case Token::EQ:
- case Token::EQ_STRICT:
- return isolate->heap()->ToBoolean(value == 0);
- case Token::NE:
- case Token::NE_STRICT:
- return isolate->heap()->ToBoolean(value != 0);
- case Token::LT:
- return isolate->heap()->ToBoolean(value < 0);
- case Token::GT:
- return isolate->heap()->ToBoolean(value > 0);
- case Token::LTE:
- return isolate->heap()->ToBoolean(value <= 0);
- case Token::GTE:
- return isolate->heap()->ToBoolean(value >= 0);
- default:
- // This should only happen during natives fuzzing.
- return isolate->heap()->undefined_value();
- }
-}
-
-
RUNTIME_FUNCTION(Runtime_NewStringWrapper) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -1282,6 +1214,7 @@ RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
static Object* Runtime_NewObjectHelper(Isolate* isolate,
Handle<Object> constructor,
+ Handle<Object> original_constructor,
Handle<AllocationSite> site) {
// If the constructor isn't a proper function we throw a type error.
if (!constructor->IsJSFunction()) {
@@ -1292,6 +1225,11 @@ static Object* Runtime_NewObjectHelper(Isolate* isolate,
Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
+ CHECK(original_constructor->IsJSFunction());
+ Handle<JSFunction> original_function =
+ Handle<JSFunction>::cast(original_constructor);
+
+
// If function should not have prototype, construction is not allowed. In this
// case generated code bailouts here, since function has no initial_map.
if (!function->should_have_prototype() && !function->shared()->bound()) {
@@ -1334,6 +1272,18 @@ static Object* Runtime_NewObjectHelper(Isolate* isolate,
result = isolate->factory()->NewJSObjectWithMemento(function, site);
}
+ // Set up the prototoype using original function.
+ // TODO(dslomov): instead of setting the __proto__,
+ // use and cache the correct map.
+ if (*original_function != *function) {
+ if (original_function->has_instance_prototype()) {
+ Handle<Object> prototype =
+ handle(original_function->instance_prototype(), isolate);
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSObject::SetPrototype(result, prototype, false));
+ }
+ }
+
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
@@ -1343,16 +1293,18 @@ static Object* Runtime_NewObjectHelper(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_NewObject) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0);
- return Runtime_NewObjectHelper(isolate, constructor,
+ CONVERT_ARG_HANDLE_CHECKED(Object, original_constructor, 1);
+ return Runtime_NewObjectHelper(isolate, constructor, original_constructor,
Handle<AllocationSite>::null());
}
RUNTIME_FUNCTION(Runtime_NewObjectWithAllocationSite) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, original_constructor, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, feedback, 0);
Handle<AllocationSite> site;
@@ -1360,7 +1312,8 @@ RUNTIME_FUNCTION(Runtime_NewObjectWithAllocationSite) {
// The feedback can be an AllocationSite or undefined.
site = Handle<AllocationSite>::cast(feedback);
}
- return Runtime_NewObjectHelper(isolate, constructor, site);
+ return Runtime_NewObjectHelper(isolate, constructor, original_constructor,
+ site);
}
@@ -1465,12 +1418,10 @@ RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
RUNTIME_ASSERT(IsValidAccessor(getter));
CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
RUNTIME_ASSERT(IsValidAccessor(setter));
- CONVERT_SMI_ARG_CHECKED(unchecked, 4);
- RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+ CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 4);
RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::DefineAccessor(obj, name, getter, setter, attr));
+ isolate, JSObject::DefineAccessor(obj, name, getter, setter, attrs));
return isolate->heap()->undefined_value();
}
@@ -1487,9 +1438,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, obj_value, 2);
- CONVERT_SMI_ARG_CHECKED(unchecked, 3);
- RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+ CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
LookupIterator it(js_object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
if (it.IsFound() && it.state() == LookupIterator::ACCESS_CHECK) {
@@ -1508,14 +1457,14 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
JSObject::SetOwnPropertyIgnoreAttributes(
- js_object, name, obj_value, attr, JSObject::DONT_FORCE_FIELD));
+ js_object, name, obj_value, attrs, JSObject::DONT_FORCE_FIELD));
return *result;
}
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::DefineObjectProperty(js_object, name, obj_value, attr));
+ Runtime::DefineObjectProperty(js_object, name, obj_value, attrs));
return *result;
}
@@ -1606,5 +1555,37 @@ RUNTIME_FUNCTION(RuntimeReference_ClassOf) {
if (!obj->IsJSReceiver()) return isolate->heap()->null_value();
return JSReceiver::cast(obj)->class_name();
}
+
+
+RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
+ CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ JSObject::DefineAccessor(object, name, getter,
+ isolate->factory()->null_value(), attrs));
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
+ CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate,
+ JSObject::DefineAccessor(object, name, isolate->factory()->null_value(),
+ setter, attrs));
+ return isolate->heap()->undefined_value();
+}
}
} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 9296a4b8a9..57ff1b2f51 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -805,7 +805,7 @@ static JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags,
uint32_t value = JSRegExp::NONE;
int length = flags->length();
// A longer flags string cannot be valid.
- if (length > 4) return JSRegExp::Flags(0);
+ if (length > 5) return JSRegExp::Flags(0);
for (int i = 0; i < length; i++) {
uint32_t flag = JSRegExp::NONE;
switch (flags->Get(i)) {
@@ -818,6 +818,10 @@ static JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags,
case 'm':
flag = JSRegExp::MULTILINE;
break;
+ case 'u':
+ if (!FLAG_harmony_unicode_regexps) return JSRegExp::Flags(0);
+ flag = JSRegExp::UNICODE_ESCAPES;
+ break;
case 'y':
if (!FLAG_harmony_regexps) return JSRegExp::Flags(0);
flag = JSRegExp::STICKY;
@@ -859,10 +863,12 @@ RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
Handle<Object> ignore_case = factory->ToBoolean(flags.is_ignore_case());
Handle<Object> multiline = factory->ToBoolean(flags.is_multiline());
Handle<Object> sticky = factory->ToBoolean(flags.is_sticky());
+ Handle<Object> unicode = factory->ToBoolean(flags.is_unicode());
Map* map = regexp->map();
Object* constructor = map->constructor();
- if (!FLAG_harmony_regexps && constructor->IsJSFunction() &&
+ if (!FLAG_harmony_regexps && !FLAG_harmony_unicode_regexps &&
+ constructor->IsJSFunction() &&
JSFunction::cast(constructor)->initial_map() == map) {
// If we still have the original map, set in-object properties directly.
// Both true and false are immovable immortal objects so no need for write
@@ -896,6 +902,10 @@ RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->sticky_string(),
sticky, final).Check();
}
+ if (FLAG_harmony_unicode_regexps) {
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ regexp, factory->unicode_string(), unicode, final).Check();
+ }
JSObject::SetOwnPropertyIgnoreAttributes(
regexp, factory->last_index_string(), zero, writable).Check();
}
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 2a0b435872..7eb2e0cfc1 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -65,10 +65,11 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
// Check whether we can reconfigure the existing property into a
// function.
PropertyDetails old_details = it.property_details();
- // TODO(verwaest): CALLBACKS invalidly includes ExecutableAccessInfo,
+ // TODO(verwaest): ACCESSOR_CONSTANT invalidly includes
+ // ExecutableAccessInfo,
// which are actually data properties, not accessor properties.
if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
- old_details.type() == CALLBACKS) {
+ old_details.type() == ACCESSOR_CONSTANT) {
return ThrowRedeclarationError(isolate, name);
}
// If the existing property is not configurable, keep its attributes. Do
@@ -106,7 +107,8 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
bool is_var = initial_value->IsUndefined();
bool is_const = initial_value->IsTheHole();
bool is_function = initial_value->IsSharedFunctionInfo();
- DCHECK(is_var + is_const + is_function == 1);
+ DCHECK_EQ(1,
+ BoolToInt(is_var) + BoolToInt(is_const) + BoolToInt(is_function));
Handle<Object> value;
if (is_function) {
@@ -151,13 +153,13 @@ RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
RUNTIME_ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 1);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<GlobalObject> global(isolate->context()->global_object());
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::SetProperty(global, name, value, strict_mode));
+ isolate, result, Object::SetProperty(global, name, value, language_mode));
return *result;
}
@@ -220,7 +222,8 @@ RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
bool is_var = *initial_value == NULL;
bool is_const = initial_value->IsTheHole();
bool is_function = initial_value->IsJSFunction();
- DCHECK(is_var + is_const + is_function == 1);
+ DCHECK_EQ(1,
+ BoolToInt(is_var) + BoolToInt(is_const) + BoolToInt(is_function));
int index;
PropertyAttributes attributes;
@@ -353,11 +356,13 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
Handle<JSFunction> callee,
Object** parameters,
int argument_count) {
+ CHECK(!IsSubclassConstructor(callee->shared()->kind()));
+ DCHECK(callee->is_simple_parameter_list());
Handle<JSObject> result =
isolate->factory()->NewArgumentsObject(callee, argument_count);
// Allocate the elements if needed.
- int parameter_count = callee->shared()->formal_parameter_count();
+ int parameter_count = callee->shared()->internal_formal_parameter_count();
if (argument_count > 0) {
if (parameter_count > 0) {
int mapped_count = Min(argument_count, parameter_count);
@@ -416,6 +421,7 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
break;
}
}
+
DCHECK(context_index >= 0);
arguments->set_the_hole(index);
parameter_map->set(
@@ -474,7 +480,9 @@ RUNTIME_FUNCTION(Runtime_NewArguments) {
// Determine parameter location on the stack and dispatch on language mode.
int argument_count = frame->GetArgumentsLength();
Object** parameters = reinterpret_cast<Object**>(frame->GetParameterSlot(-1));
- return callee->shared()->strict_mode() == STRICT
+
+ return (is_strict(callee->shared()->language_mode()) ||
+ !callee->is_simple_parameter_list())
? *NewStrictArguments(isolate, callee, parameters, argument_count)
: *NewSloppyArguments(isolate, callee, parameters, argument_count);
}
@@ -500,6 +508,51 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
}
+static Handle<JSArray> NewRestParam(Isolate* isolate,
+ Object** parameters,
+ int num_params,
+ int rest_index) {
+ parameters -= rest_index;
+ int num_elements = std::max(0, num_params - rest_index);
+ Handle<FixedArray> elements =
+ isolate->factory()->NewUninitializedFixedArray(num_elements);
+ for (int i = 0; i < num_elements; ++i) {
+ elements->set(i, *--parameters);
+ }
+ return isolate->factory()->NewJSArrayWithElements(elements, FAST_ELEMENTS,
+ num_elements);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewRestParam) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ Object** parameters = reinterpret_cast<Object**>(args[0]);
+ CONVERT_SMI_ARG_CHECKED(num_params, 1);
+ CONVERT_SMI_ARG_CHECKED(rest_index, 2);
+
+ return *NewRestParam(isolate, parameters, num_params, rest_index);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewRestParamSlow) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(rest_index, 0);
+
+ JavaScriptFrameIterator it(isolate);
+
+ // Find the frame that holds the actual arguments passed to the function.
+ it.AdvanceToArgumentsFrame();
+ JavaScriptFrame* frame = it.frame();
+
+ int argument_count = frame->GetArgumentsLength();
+ Object** parameters = reinterpret_cast<Object**>(frame->GetParameterSlot(-1));
+
+ return *NewRestParam(isolate, parameters, argument_count, rest_index);
+}
+
+
RUNTIME_FUNCTION(Runtime_NewClosureFromStubFailure) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -748,13 +801,6 @@ RUNTIME_FUNCTION(Runtime_DeclareModules) {
USE(result);
break;
}
- case MODULE: {
- Object* referenced_context = Context::cast(host_context)->get(index);
- Handle<JSModule> value(Context::cast(referenced_context)->module());
- JSObject::SetOwnPropertyIgnoreAttributes(module, name, value, FROZEN)
- .Assert();
- break;
- }
case INTERNAL:
case TEMPORARY:
case DYNAMIC:
@@ -946,7 +992,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
- CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 3);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
int index;
PropertyAttributes attributes;
@@ -961,7 +1007,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
if (index >= 0) {
if ((attributes & READ_ONLY) == 0) {
Handle<Context>::cast(holder)->set(index, *value);
- } else if (strict_mode == STRICT) {
+ } else if (is_strict(language_mode)) {
// Setting read only property in strict mode.
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
@@ -977,7 +1023,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
if (attributes != ABSENT) {
// The property exists on the holder.
object = Handle<JSReceiver>::cast(holder);
- } else if (strict_mode == STRICT) {
+ } else if (is_strict(language_mode)) {
// If absent in strict mode: throw.
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError("not_defined", HandleVector(&name, 1)));
@@ -987,7 +1033,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
}
RETURN_FAILURE_ON_EXCEPTION(
- isolate, Object::SetProperty(object, name, value, strict_mode));
+ isolate, Object::SetProperty(object, name, value, language_mode));
return *value;
}
@@ -1054,7 +1100,7 @@ RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
}
if (String::Equals(isolate->factory()->callee_string(), key)) {
JSFunction* function = frame->function();
- if (function->shared()->strict_mode() == STRICT) {
+ if (is_strict(function->shared()->language_mode())) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError("strict_arguments_callee",
HandleVector<Object>(NULL, 0)));
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index b4b90e2c58..89e1f2a696 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -53,9 +53,8 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- // The following two assertions are lifted from the DCHECKs inside
+ // The following assertion was lifted from the DCHECK inside
// JSFunction::MarkForOptimization().
- RUNTIME_ASSERT(!function->shared()->is_generator());
RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
(function->code()->kind() == Code::FUNCTION &&
function->code()->optimizable()));
@@ -65,23 +64,13 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
- // If the function cannot optimized, just return.
- if (function->shared()->optimization_disabled()) {
- return isolate->heap()->undefined_value();
- }
-
function->MarkForOptimization();
Code* unoptimized = function->shared()->code();
if (args.length() == 2 && unoptimized->kind() == Code::FUNCTION) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
- if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("osr")) && FLAG_use_osr) {
- // Start patching from the currently patched loop nesting level.
- DCHECK(BackEdgeTable::Verify(isolate, unoptimized));
- isolate->runtime_profiler()->AttemptOnStackReplacement(
- *function, Code::kMaxLoopNestingMarker);
- } else if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("concurrent")) &&
- isolate->concurrent_recompilation_enabled()) {
+ if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("concurrent")) &&
+ isolate->concurrent_recompilation_enabled()) {
function->AttemptConcurrentOptimization();
}
}
@@ -90,6 +79,45 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
}
+RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 0);
+ Handle<JSFunction> function = Handle<JSFunction>::null();
+
+ {
+ // Find the JavaScript function on the top of the stack.
+ JavaScriptFrameIterator it(isolate);
+ while (!it.done()) {
+ if (it.frame()->is_java_script()) {
+ function = Handle<JSFunction>(it.frame()->function());
+ break;
+ }
+ }
+ if (function.is_null()) return isolate->heap()->undefined_value();
+ }
+
+ // The following assertion was lifted from the DCHECK inside
+ // JSFunction::MarkForOptimization().
+ RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
+ (function->code()->kind() == Code::FUNCTION &&
+ function->code()->optimizable()));
+
+ if (!isolate->use_crankshaft()) return isolate->heap()->undefined_value();
+
+ // If the function is already optimized, just return.
+ if (function->IsOptimized()) return isolate->heap()->undefined_value();
+
+ Code* unoptimized = function->shared()->code();
+ if (unoptimized->kind() == Code::FUNCTION) {
+ DCHECK(BackEdgeTable::Verify(isolate, unoptimized));
+ isolate->runtime_profiler()->AttemptOnStackReplacement(
+ *function, Code::kMaxLoopNestingMarker);
+ }
+
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index cd2c0eb9fa..82224bc9b3 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -63,6 +63,8 @@ bool Runtime::SetupArrayBufferAllocatingData(Isolate* isolate,
bool initialize) {
void* data;
CHECK(V8::ArrayBufferAllocator() != NULL);
+ // Prevent creating array buffers when serializing.
+ DCHECK(!isolate->serializer_enabled());
if (allocated_length != 0) {
if (initialize) {
data = V8::ArrayBufferAllocator()->Allocate(allocated_length);
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 95d75f5084..c44e40208f 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -70,13 +70,12 @@ namespace internal {
PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
-// Assert that the given argument has a valid value for a StrictMode
-// and store it in a StrictMode variable with the given name.
-#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
- RUNTIME_ASSERT(args.smi_at(index) == STRICT || \
- args.smi_at(index) == SLOPPY); \
- StrictMode name = static_cast<StrictMode>(args.smi_at(index));
+// Assert that the given argument has a valid value for a LanguageMode
+// and store it in a LanguageMode variable with the given name.
+#define CONVERT_LANGUAGE_MODE_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsSmi()); \
+ RUNTIME_ASSERT(is_valid_language_mode(args.smi_at(index))); \
+ LanguageMode name = static_cast<LanguageMode>(args.smi_at(index));
// Assert that the given argument is a number within the Int32 range
@@ -88,6 +87,16 @@ namespace internal {
RUNTIME_ASSERT(args[index]->ToInt32(&name));
+// Cast the given argument to PropertyAttributes and store its value in a
+// variable with the given name. If the argument is not a Smi call or the
+// enum value is out of range, call IllegalOperation and return.
+#define CONVERT_PROPERTY_ATTRIBUTES_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsSmi()); \
+ RUNTIME_ASSERT( \
+ (args.smi_at(index) & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); \
+ PropertyAttributes name = static_cast<PropertyAttributes>(args.smi_at(index));
+
+
// A mechanism to return a pair of Object pointers in registers (if possible).
// How this is achieved is calling convention-dependent.
// All currently supported x86 compiles uses calling conventions that are cdecl
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 459ca50447..d01c141424 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -22,8 +22,6 @@ namespace internal {
// Reference implementation for inlined runtime functions. Only used when the
// compiler does not support a certain intrinsic. Don't optimize these, but
// implement the intrinsic in the respective compiler instead.
-// TODO(mstarzinger): These are place-holder stubs for TurboFan and will
-// eventually all have a C++ implementation and this macro will be gone.
#define I(name, number_of_args, result_size) \
Object* RuntimeReference_##name(int args_length, Object** args_object, \
Isolate* isolate);
@@ -80,7 +78,7 @@ void Runtime::InitializeIntrinsicFunctionNames(Isolate* isolate,
if (name == NULL) continue;
Handle<NameDictionary> new_dict = NameDictionary::Add(
dict, isolate->factory()->InternalizeUtf8String(name),
- Handle<Smi>(Smi::FromInt(i), isolate), PropertyDetails(NONE, FIELD, 0));
+ Handle<Smi>(Smi::FromInt(i), isolate), PropertyDetails(NONE, DATA, 0));
// The dictionary does not need to grow.
CHECK(new_dict.is_identical_to(dict));
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 9e6c495162..d277aee166 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -66,6 +66,7 @@ namespace internal {
F(RunningInSimulator, 0, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(OptimizeOsr, 0, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
@@ -73,6 +74,7 @@ namespace internal {
F(CompileForOnStackReplacement, 1, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(SetNativeFlag, 1, 1) \
+ F(IsConstructor, 1, 1) \
F(SetInlineBuiltinFlag, 1, 1) \
F(StoreArrayLiteralElement, 5, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
@@ -96,8 +98,6 @@ namespace internal {
F(ToBool, 1, 1) \
F(Typeof, 1, 1) \
\
- F(Booleanize, 2, 1) /* TODO(turbofan): Only temporary */ \
- \
F(StringToNumber, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringParseFloat, 1, 1) \
@@ -151,7 +151,7 @@ namespace internal {
F(MathExpRT, 1, 1) \
F(RoundNumber, 1, 1) \
F(MathFround, 1, 1) \
- F(RemPiO2, 1, 1) \
+ F(RemPiO2, 2, 1) \
\
/* Regular expressions */ \
F(RegExpInitializeAndCompile, 3, 1) \
@@ -187,18 +187,18 @@ namespace internal {
F(HomeObjectSymbol, 0, 1) \
F(DefineClass, 6, 1) \
F(DefineClassMethod, 3, 1) \
- F(DefineClassGetter, 3, 1) \
- F(DefineClassSetter, 3, 1) \
F(ClassGetSourceCode, 1, 1) \
- F(ThrowNonMethodError, 0, 1) \
- F(ThrowUnsupportedSuperError, 0, 1) \
F(LoadFromSuper, 3, 1) \
F(LoadKeyedFromSuper, 3, 1) \
+ F(ThrowConstructorNonCallableError, 0, 1) \
+ F(ThrowArrayNotSubclassableError, 0, 1) \
+ F(ThrowNonMethodError, 0, 1) \
+ F(ThrowUnsupportedSuperError, 0, 1) \
+ F(HandleStepInForDerivedConstructors, 1, 1) \
F(StoreToSuper_Strict, 4, 1) \
F(StoreToSuper_Sloppy, 4, 1) \
F(StoreKeyedToSuper_Strict, 4, 1) \
- F(StoreKeyedToSuper_Sloppy, 4, 1) \
- F(DefaultConstructorSuperCall, 0, 1)
+ F(StoreKeyedToSuper_Sloppy, 4, 1)
#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
@@ -233,9 +233,6 @@ namespace internal {
\
F(SetCode, 2, 1) \
\
- F(CreateApiFunction, 2, 1) \
- F(IsTemplate, 1, 1) \
- F(GetTemplateField, 2, 1) \
F(DisableAccessChecks, 1, 1) \
F(EnableAccessChecks, 1, 1) \
\
@@ -255,13 +252,13 @@ namespace internal {
F(GlobalProxy, 1, 1) \
\
F(AddNamedProperty, 4, 1) \
- F(AddPropertyForTemplate, 4, 1) \
F(SetProperty, 4, 1) \
F(AddElement, 4, 1) \
- F(DefineApiAccessorProperty, 5, 1) \
F(DefineDataPropertyUnchecked, 4, 1) \
F(DefineAccessorPropertyUnchecked, 5, 1) \
F(GetDataProperty, 2, 1) \
+ F(DefineGetterPropertyUnchecked, 4, 1) \
+ F(DefineSetterPropertyUnchecked, 4, 1) \
\
/* Arrays */ \
F(RemoveArrayHoles, 2, 1) \
@@ -382,6 +379,7 @@ namespace internal {
F(Abort, 1, 1) \
F(AbortJS, 1, 1) \
F(NativeScriptsCount, 0, 1) \
+ F(RenderCallSite, 0, 1) \
/* ES5 */ \
F(OwnKeys, 1, 1) \
\
@@ -455,6 +453,8 @@ namespace internal {
F(NewArguments, 1, 1) /* TODO(turbofan): Only temporary */ \
F(NewSloppyArguments, 3, 1) \
F(NewStrictArguments, 3, 1) \
+ F(NewRestParam, 3, 1) \
+ F(NewRestParamSlow, 1, 1) \
\
/* Harmony generators */ \
F(CreateJSGeneratorObject, 0, 1) \
@@ -464,6 +464,7 @@ namespace internal {
\
/* Arrays */ \
F(ArrayConstructor, -1, 1) \
+ F(ArrayConstructorWithSubclassing, -1, 1) \
F(InternalArrayConstructor, -1, 1) \
\
/* Literals */ \
@@ -475,8 +476,8 @@ namespace internal {
/* Statements */ \
F(NewClosure, 3, 1) \
F(NewClosureFromStubFailure, 1, 1) \
- F(NewObject, 1, 1) \
- F(NewObjectWithAllocationSite, 2, 1) \
+ F(NewObject, 2, 1) \
+ F(NewObjectWithAllocationSite, 3, 1) \
F(FinalizeInstanceSize, 1, 1) \
F(Throw, 1, 1) \
F(ReThrow, 1, 1) \
@@ -655,6 +656,7 @@ namespace internal {
F(IsJSProxy, 1, 1) \
F(IsConstructCall, 0, 1) \
F(CallFunction, -1 /* receiver + n args + function */, 1) \
+ F(DefaultConstructorCallSuper, 0, 1) \
F(ArgumentsLength, 0, 1) \
F(Arguments, 1, 1) \
F(ValueOf, 1, 1) \
@@ -821,7 +823,7 @@ class Runtime : public AllStatic {
MUST_USE_RESULT static MaybeHandle<Object> SetObjectProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key,
- Handle<Object> value, StrictMode strict_mode);
+ Handle<Object> value, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> DefineObjectProperty(
Handle<JSObject> object, Handle<Object> key, Handle<Object> value,
@@ -877,6 +879,13 @@ class Runtime : public AllStatic {
MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
Isolate* isolate, Handle<FixedArray> literals,
Handle<FixedArray> elements);
+
+ static void WeakCollectionInitialize(
+ Isolate* isolate, Handle<JSWeakCollection> weak_collection);
+ static void WeakCollectionSet(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key, Handle<Object> value);
+ static bool WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key);
};
@@ -890,7 +899,8 @@ class AllocateTargetSpace : public BitField<AllocationSpace, 1, 3> {};
class DeclareGlobalsEvalFlag : public BitField<bool, 0, 1> {};
class DeclareGlobalsNativeFlag : public BitField<bool, 1, 1> {};
-class DeclareGlobalsStrictMode : public BitField<StrictMode, 2, 1> {};
+STATIC_ASSERT(LANGUAGE_END == 3);
+class DeclareGlobalsLanguageMode : public BitField<LanguageMode, 2, 2> {};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index aaa32b9e14..a0ae5b2ced 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -10,7 +10,6 @@
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
-#include "src/zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index 760df80708..2c6cac6693 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -13,7 +13,7 @@
#include <signal.h>
#include <sys/time.h>
-#if !V8_OS_QNX && !V8_OS_NACL
+#if !V8_OS_QNX && !V8_OS_NACL && !V8_OS_AIX
#include <sys/syscall.h> // NOLINT
#endif
@@ -256,6 +256,12 @@ class SimulatorHelper {
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
+#elif V8_TARGET_ARCH_PPC
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp =
+ reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
+ state->fp =
+ reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
#endif
}
@@ -361,7 +367,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !V8_OS_OPENBSD
+#if !(V8_OS_OPENBSD || (V8_OS_LINUX && V8_HOST_ARCH_PPC))
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
#if V8_OS_LINUX
@@ -398,6 +404,10 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_PPC
+ state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip);
+ state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
+ state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_MACOSX
#if V8_HOST_ARCH_X64
@@ -469,7 +479,11 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
#endif // V8_HOST_ARCH_*
-#endif // V8_OS_QNX
+#elif V8_OS_AIX
+ state.pc = reinterpret_cast<Address>(mcontext.jmp_context.iar);
+ state.sp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[1]);
+ state.fp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[31]);
+#endif // V8_OS_AIX
#endif // USE_SIMULATOR
sampler->SampleStack(state);
}
diff --git a/deps/v8/src/scanner-character-streams.cc b/deps/v8/src/scanner-character-streams.cc
index 50c3955c1b..cc4a18b540 100644
--- a/deps/v8/src/scanner-character-streams.cc
+++ b/deps/v8/src/scanner-character-streams.cc
@@ -15,9 +15,9 @@ namespace internal {
namespace {
-unsigned CopyCharsHelper(uint16_t* dest, unsigned length, const uint8_t* src,
- unsigned* src_pos, unsigned src_length,
- ScriptCompiler::StreamedSource::Encoding encoding) {
+size_t CopyCharsHelper(uint16_t* dest, size_t length, const uint8_t* src,
+ size_t* src_pos, size_t src_length,
+ ScriptCompiler::StreamedSource::Encoding encoding) {
// It's possible that this will be called with length 0, but don't assume that
// the functions this calls handle it gracefully.
if (length == 0) return 0;
@@ -27,7 +27,7 @@ unsigned CopyCharsHelper(uint16_t* dest, unsigned length, const uint8_t* src,
dest, length, src, src_pos, src_length);
}
- unsigned to_fill = length;
+ size_t to_fill = length;
if (to_fill > src_length - *src_pos) to_fill = src_length - *src_pos;
if (encoding == ScriptCompiler::StreamedSource::ONE_BYTE) {
@@ -110,13 +110,13 @@ bool BufferedUtf16CharacterStream::ReadBlock() {
if (buffer_cursor_ < buffer_end_) return true;
// Otherwise read a new block.
}
- unsigned length = FillBuffer(pos_);
+ size_t length = FillBuffer(pos_);
buffer_end_ = buffer_ + length;
return length > 0;
}
-unsigned BufferedUtf16CharacterStream::SlowSeekForward(unsigned delta) {
+size_t BufferedUtf16CharacterStream::SlowSeekForward(size_t delta) {
// Leave pushback mode (i.e., ignore that there might be valid data
// in the buffer before the pushback_limit_ point).
pushback_limit_ = NULL;
@@ -129,11 +129,8 @@ unsigned BufferedUtf16CharacterStream::SlowSeekForward(unsigned delta) {
GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
- Handle<String> data,
- unsigned start_position,
- unsigned end_position)
- : string_(data),
- length_(end_position) {
+ Handle<String> data, size_t start_position, size_t end_position)
+ : string_(data), length_(end_position) {
DCHECK(end_position >= start_position);
pos_ = start_position;
}
@@ -142,21 +139,22 @@ GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
GenericStringUtf16CharacterStream::~GenericStringUtf16CharacterStream() { }
-unsigned GenericStringUtf16CharacterStream::BufferSeekForward(unsigned delta) {
- unsigned old_pos = pos_;
+size_t GenericStringUtf16CharacterStream::BufferSeekForward(size_t delta) {
+ size_t old_pos = pos_;
pos_ = Min(pos_ + delta, length_);
ReadBlock();
return pos_ - old_pos;
}
-unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos) {
+size_t GenericStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
if (from_pos >= length_) return 0;
- unsigned length = kBufferSize;
+ size_t length = kBufferSize;
if (from_pos + length > length_) {
length = length_ - from_pos;
}
- String::WriteToFlat<uc16>(*string_, buffer_, from_pos, from_pos + length);
+ String::WriteToFlat<uc16>(*string_, buffer_, static_cast<int>(from_pos),
+ static_cast<int>(from_pos + length));
return length;
}
@@ -164,7 +162,7 @@ unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos) {
// ----------------------------------------------------------------------------
// Utf8ToUtf16CharacterStream
Utf8ToUtf16CharacterStream::Utf8ToUtf16CharacterStream(const byte* data,
- unsigned length)
+ size_t length)
: BufferedUtf16CharacterStream(),
raw_data_(data),
raw_data_length_(length),
@@ -177,12 +175,11 @@ Utf8ToUtf16CharacterStream::Utf8ToUtf16CharacterStream(const byte* data,
Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
-unsigned Utf8ToUtf16CharacterStream::CopyChars(uint16_t* dest, unsigned length,
- const byte* src,
- unsigned* src_pos,
- unsigned src_length) {
+size_t Utf8ToUtf16CharacterStream::CopyChars(uint16_t* dest, size_t length,
+ const byte* src, size_t* src_pos,
+ size_t src_length) {
static const unibrow::uchar kMaxUtf16Character = 0xffff;
- unsigned i = 0;
+ size_t i = 0;
// Because of the UTF-16 lead and trail surrogates, we stop filling the buffer
// one character early (in the normal case), because we need to have at least
// two free spaces in the buffer to be sure that the next character will fit.
@@ -206,9 +203,9 @@ unsigned Utf8ToUtf16CharacterStream::CopyChars(uint16_t* dest, unsigned length,
}
-unsigned Utf8ToUtf16CharacterStream::BufferSeekForward(unsigned delta) {
- unsigned old_pos = pos_;
- unsigned target_pos = pos_ + delta;
+size_t Utf8ToUtf16CharacterStream::BufferSeekForward(size_t delta) {
+ size_t old_pos = pos_;
+ size_t target_pos = pos_ + delta;
SetRawPosition(target_pos);
pos_ = raw_character_position_;
ReadBlock();
@@ -216,15 +213,15 @@ unsigned Utf8ToUtf16CharacterStream::BufferSeekForward(unsigned delta) {
}
-unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position) {
+size_t Utf8ToUtf16CharacterStream::FillBuffer(size_t char_position) {
SetRawPosition(char_position);
if (raw_character_position_ != char_position) {
// char_position was not a valid position in the stream (hit the end
// while spooling to it).
return 0u;
}
- unsigned i = CopyChars(buffer_, kBufferSize, raw_data_, &raw_data_pos_,
- raw_data_length_);
+ size_t i = CopyChars(buffer_, kBufferSize, raw_data_, &raw_data_pos_,
+ raw_data_length_);
raw_character_position_ = char_position + i;
return i;
}
@@ -249,7 +246,7 @@ static bool IsUtf8MultiCharacterFollower(byte later_byte) {
// Move the cursor back to point at the preceding UTF-8 character start
// in the buffer.
-static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
+static inline void Utf8CharacterBack(const byte* buffer, size_t* cursor) {
byte character = buffer[--*cursor];
if (character > unibrow::Utf8::kMaxOneByteChar) {
DCHECK(IsUtf8MultiCharacterFollower(character));
@@ -264,7 +261,7 @@ static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
// Move the cursor forward to point at the next following UTF-8 character start
// in the buffer.
-static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
+static inline void Utf8CharacterForward(const byte* buffer, size_t* cursor) {
byte character = buffer[(*cursor)++];
if (character > unibrow::Utf8::kMaxOneByteChar) {
// First character of a multi-byte character encoding.
@@ -279,7 +276,7 @@ static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
// 2 if value in range 0xE0 .. 0xEF.
// 3 if value in range 0xF0 .. 0xF7.
// Encode that in a single value.
- unsigned additional_bytes =
+ size_t additional_bytes =
((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
*cursor += additional_bytes;
DCHECK(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
@@ -291,11 +288,11 @@ static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
// is no position in the UTF8 stream that corresponds to that. This assumes
// that the surrogate pair is correctly coded as a 4 byte UTF-8 sequence. If
// it is illegally coded as two 3 byte sequences then there is no problem here.
-void Utf8ToUtf16CharacterStream::SetRawPosition(unsigned target_position) {
+void Utf8ToUtf16CharacterStream::SetRawPosition(size_t target_position) {
if (raw_character_position_ > target_position) {
// Spool backwards in utf8 buffer.
do {
- int old_pos = raw_data_pos_;
+ size_t old_pos = raw_data_pos_;
Utf8CharacterBack(raw_data_, &raw_data_pos_);
raw_character_position_--;
DCHECK(old_pos - raw_data_pos_ <= 4);
@@ -309,7 +306,7 @@ void Utf8ToUtf16CharacterStream::SetRawPosition(unsigned target_position) {
// Spool forwards in the utf8 buffer.
while (raw_character_position_ < target_position) {
if (raw_data_pos_ == raw_data_length_) return;
- int old_pos = raw_data_pos_;
+ size_t old_pos = raw_data_pos_;
Utf8CharacterForward(raw_data_, &raw_data_pos_);
raw_character_position_++;
DCHECK(raw_data_pos_ - old_pos <= 4);
@@ -320,10 +317,10 @@ void Utf8ToUtf16CharacterStream::SetRawPosition(unsigned target_position) {
}
-unsigned ExternalStreamingStream::FillBuffer(unsigned position) {
+size_t ExternalStreamingStream::FillBuffer(size_t position) {
// Ignore "position" which is the position in the decoded data. Instead,
// ExternalStreamingStream keeps track of the position in the raw data.
- unsigned data_in_buffer = 0;
+ size_t data_in_buffer = 0;
// Note that the UTF-8 decoder might not be able to fill the buffer
// completely; it will typically leave the last character empty (see
// Utf8ToUtf16CharacterStream::CopyChars).
@@ -331,10 +328,8 @@ unsigned ExternalStreamingStream::FillBuffer(unsigned position) {
if (current_data_ == NULL) {
// GetSomeData will wait until the embedder has enough data. Here's an
// interface between the API which uses size_t (which is the correct type
- // here) and the internal parts which use unsigned. TODO(marja): make the
- // internal parts use size_t too.
- current_data_length_ =
- static_cast<unsigned>(source_stream_->GetMoreData(&current_data_));
+ // here) and the internal parts which use size_t.
+ current_data_length_ = source_stream_->GetMoreData(&current_data_);
current_data_offset_ = 0;
bool data_ends = current_data_length_ == 0;
@@ -363,8 +358,8 @@ unsigned ExternalStreamingStream::FillBuffer(unsigned position) {
}
// Fill the buffer from current_data_.
- unsigned new_offset = 0;
- unsigned new_chars_in_buffer =
+ size_t new_offset = 0;
+ size_t new_chars_in_buffer =
CopyCharsHelper(buffer_ + data_in_buffer, kBufferSize - data_in_buffer,
current_data_ + current_data_offset_, &new_offset,
current_data_length_ - current_data_offset_, encoding_);
@@ -384,7 +379,7 @@ unsigned ExternalStreamingStream::FillBuffer(unsigned position) {
}
void ExternalStreamingStream::HandleUtf8SplitCharacters(
- unsigned* data_in_buffer) {
+ size_t* data_in_buffer) {
// Note the following property of UTF-8 which makes this function possible:
// Given any byte, we can always read its local environment (in both
// directions) to find out the (possibly multi-byte) character it belongs
@@ -407,8 +402,8 @@ void ExternalStreamingStream::HandleUtf8SplitCharacters(
}
// Convert the data in utf8_split_char_buffer_.
- unsigned new_offset = 0;
- unsigned new_chars_in_buffer =
+ size_t new_offset = 0;
+ size_t new_chars_in_buffer =
CopyCharsHelper(buffer_ + *data_in_buffer,
kBufferSize - *data_in_buffer, utf8_split_char_buffer_,
&new_offset, utf8_split_char_buffer_length_, encoding_);
@@ -439,7 +434,7 @@ void ExternalStreamingStream::HandleUtf8SplitCharacters(
}
}
CHECK(utf8_split_char_buffer_length_ <= 4);
- for (unsigned i = 0; i < utf8_split_char_buffer_length_; ++i) {
+ for (size_t i = 0; i < utf8_split_char_buffer_length_; ++i) {
utf8_split_char_buffer_[i] = current_data_[current_data_length_ + i];
}
}
diff --git a/deps/v8/src/scanner-character-streams.h b/deps/v8/src/scanner-character-streams.h
index 3c1cccc480..a391a659a4 100644
--- a/deps/v8/src/scanner-character-streams.h
+++ b/deps/v8/src/scanner-character-streams.h
@@ -21,15 +21,15 @@ class BufferedUtf16CharacterStream: public Utf16CharacterStream {
virtual void PushBack(uc32 character);
protected:
- static const unsigned kBufferSize = 512;
- static const unsigned kPushBackStepSize = 16;
+ static const size_t kBufferSize = 512;
+ static const size_t kPushBackStepSize = 16;
- virtual unsigned SlowSeekForward(unsigned delta);
+ virtual size_t SlowSeekForward(size_t delta);
virtual bool ReadBlock();
virtual void SlowPushBack(uc16 character);
- virtual unsigned BufferSeekForward(unsigned delta) = 0;
- virtual unsigned FillBuffer(unsigned position) = 0;
+ virtual size_t BufferSeekForward(size_t delta) = 0;
+ virtual size_t FillBuffer(size_t position) = 0;
const uc16* pushback_limit_;
uc16 buffer_[kBufferSize];
@@ -39,40 +39,39 @@ class BufferedUtf16CharacterStream: public Utf16CharacterStream {
// Generic string stream.
class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
public:
- GenericStringUtf16CharacterStream(Handle<String> data,
- unsigned start_position,
- unsigned end_position);
+ GenericStringUtf16CharacterStream(Handle<String> data, size_t start_position,
+ size_t end_position);
virtual ~GenericStringUtf16CharacterStream();
protected:
- virtual unsigned BufferSeekForward(unsigned delta);
- virtual unsigned FillBuffer(unsigned position);
+ virtual size_t BufferSeekForward(size_t delta);
+ virtual size_t FillBuffer(size_t position);
Handle<String> string_;
- unsigned length_;
+ size_t length_;
};
// Utf16 stream based on a literal UTF-8 string.
class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
public:
- Utf8ToUtf16CharacterStream(const byte* data, unsigned length);
+ Utf8ToUtf16CharacterStream(const byte* data, size_t length);
virtual ~Utf8ToUtf16CharacterStream();
- static unsigned CopyChars(uint16_t* dest, unsigned length, const byte* src,
- unsigned* src_pos, unsigned src_length);
+ static size_t CopyChars(uint16_t* dest, size_t length, const byte* src,
+ size_t* src_pos, size_t src_length);
protected:
- virtual unsigned BufferSeekForward(unsigned delta);
- virtual unsigned FillBuffer(unsigned char_position);
- void SetRawPosition(unsigned char_position);
+ virtual size_t BufferSeekForward(size_t delta);
+ virtual size_t FillBuffer(size_t char_position);
+ void SetRawPosition(size_t char_position);
const byte* raw_data_;
- unsigned raw_data_length_; // Measured in bytes, not characters.
- unsigned raw_data_pos_;
+ size_t raw_data_length_; // Measured in bytes, not characters.
+ size_t raw_data_pos_;
// The character position of the character at raw_data[raw_data_pos_].
// Not necessarily the same as pos_.
- unsigned raw_character_position_;
+ size_t raw_character_position_;
};
@@ -91,7 +90,7 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
virtual ~ExternalStreamingStream() { delete[] current_data_; }
- unsigned BufferSeekForward(unsigned delta) OVERRIDE {
+ size_t BufferSeekForward(size_t delta) OVERRIDE {
// We never need to seek forward when streaming scripts. We only seek
// forward when we want to parse a function whose location we already know,
// and when streaming, we don't know the locations of anything we haven't
@@ -100,19 +99,19 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
return 0;
}
- unsigned FillBuffer(unsigned position) OVERRIDE;
+ size_t FillBuffer(size_t position) OVERRIDE;
private:
- void HandleUtf8SplitCharacters(unsigned* data_in_buffer);
+ void HandleUtf8SplitCharacters(size_t* data_in_buffer);
ScriptCompiler::ExternalSourceStream* source_stream_;
v8::ScriptCompiler::StreamedSource::Encoding encoding_;
const uint8_t* current_data_;
- unsigned current_data_offset_;
- unsigned current_data_length_;
+ size_t current_data_offset_;
+ size_t current_data_length_;
// For converting UTF-8 characters which are split across two data chunks.
uint8_t utf8_split_char_buffer_[4];
- unsigned utf8_split_char_buffer_length_;
+ size_t utf8_split_char_buffer_length_;
};
@@ -131,7 +130,7 @@ class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
}
protected:
- virtual unsigned SlowSeekForward(unsigned delta) {
+ virtual size_t SlowSeekForward(size_t delta) {
// Fast case always handles seeking.
return 0;
}
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index d499e9b996..de1b8e8b72 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -599,6 +599,15 @@ void Scanner::Scan() {
token = ScanNumber(true);
} else {
token = Token::PERIOD;
+ if (c0_ == '.') {
+ Advance();
+ if (c0_ == '.') {
+ Advance();
+ token = Token::ELLIPSIS;
+ } else {
+ PushBack('.');
+ }
+ }
}
break;
@@ -834,7 +843,7 @@ Token::Value Scanner::ScanTemplateSpan() {
ReduceRawLiteralLength(2);
break;
} else if (c == '\\') {
- if (unicode_cache_->IsLineTerminator(c0_)) {
+ if (c0_ > 0 && unicode_cache_->IsLineTerminator(c0_)) {
// The TV of LineContinuation :: \ LineTerminatorSequence is the empty
// code unit sequence.
uc32 lastChar = c0_;
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 6e668fd492..86a0098f86 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -67,15 +67,14 @@ class Utf16CharacterStream {
// Return the current position in the code unit stream.
// Starts at zero.
- inline unsigned pos() const { return pos_; }
+ inline size_t pos() const { return pos_; }
// Skips forward past the next code_unit_count UTF-16 code units
// in the input, or until the end of input if that comes sooner.
// Returns the number of code units actually skipped. If less
// than code_unit_count,
- inline unsigned SeekForward(unsigned code_unit_count) {
- unsigned buffered_chars =
- static_cast<unsigned>(buffer_end_ - buffer_cursor_);
+ inline size_t SeekForward(size_t code_unit_count) {
+ size_t buffered_chars = buffer_end_ - buffer_cursor_;
if (code_unit_count <= buffered_chars) {
buffer_cursor_ += code_unit_count;
pos_ += code_unit_count;
@@ -98,11 +97,11 @@ class Utf16CharacterStream {
// is at or after the end of the input, return false. If there
// are more code_units available, return true.
virtual bool ReadBlock() = 0;
- virtual unsigned SlowSeekForward(unsigned code_unit_count) = 0;
+ virtual size_t SlowSeekForward(size_t code_unit_count) = 0;
const uint16_t* buffer_cursor_;
const uint16_t* buffer_end_;
- unsigned pos_;
+ size_t pos_;
};
@@ -121,6 +120,12 @@ class UnicodeCache {
bool IsIdentifierStart(unibrow::uchar c) { return kIsIdentifierStart.get(c); }
bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
+ bool IsLineTerminatorSequence(unibrow::uchar c, unibrow::uchar next) {
+ if (!IsLineTerminator(c)) return false;
+ if (c == 0x000d && next == 0x000a) return false; // CR with following LF.
+ return true;
+ }
+
bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
bool IsWhiteSpaceOrLineTerminator(unibrow::uchar c) {
return kIsWhiteSpaceOrLineTerminator.get(c);
@@ -691,7 +696,7 @@ class Scanner {
// Return the current source position.
int source_pos() {
- return source_->pos() - kCharacterLookaheadBufferSize;
+ return static_cast<int>(source_->pos()) - kCharacterLookaheadBufferSize;
}
UnicodeCache* unicode_cache_;
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index b9cb6f3ba5..74aefdb954 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -13,7 +13,8 @@ namespace v8 {
namespace internal {
-Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
+Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
+ Scope* scope) {
// Collect stack and context locals.
ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
@@ -24,6 +25,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
DCHECK(scope->StackLocalCount() == stack_local_count);
DCHECK(scope->ContextLocalCount() == context_local_count);
+ bool simple_parameter_list =
+ scope->is_function_scope() ? scope->is_simple_parameter_list() : true;
+
// Determine use and location of the function variable if it is present.
FunctionVariableInfo function_name_info;
VariableMode function_variable_mode;
@@ -49,17 +53,18 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
+ parameter_count + stack_local_count + 2 * context_local_count
+ (has_function_name ? 2 : 0);
- Factory* factory = zone->isolate()->factory();
+ Factory* factory = isolate->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
// Encode the flags.
int flags = ScopeTypeField::encode(scope->scope_type()) |
CallsEvalField::encode(scope->calls_eval()) |
- StrictModeField::encode(scope->strict_mode()) |
+ LanguageModeField::encode(scope->language_mode()) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode) |
AsmModuleField::encode(scope->asm_module()) |
- AsmFunctionField::encode(scope->asm_function());
+ AsmFunctionField::encode(scope->asm_function()) |
+ IsSimpleParameterListField::encode(simple_parameter_list);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
scope_info->SetStackLocalCount(stack_local_count);
@@ -145,8 +150,8 @@ bool ScopeInfo::CallsEval() {
}
-StrictMode ScopeInfo::strict_mode() {
- return length() > 0 ? StrictModeField::decode(Flags()) : SLOPPY;
+LanguageMode ScopeInfo::language_mode() {
+ return length() > 0 ? LanguageModeField::decode(Flags()) : SLOPPY;
}
@@ -547,25 +552,19 @@ void ScopeInfo::Print() {
//---------------------------------------------------------------------------
// ModuleInfo.
-Handle<ModuleInfo> ModuleInfo::Create(
- Isolate* isolate, Interface* interface, Scope* scope) {
- Handle<ModuleInfo> info = Allocate(isolate, interface->Length());
- info->set_host_index(interface->Index());
+Handle<ModuleInfo> ModuleInfo::Create(Isolate* isolate,
+ ModuleDescriptor* descriptor,
+ Scope* scope) {
+ Handle<ModuleInfo> info = Allocate(isolate, descriptor->Length());
+ info->set_host_index(descriptor->Index());
int i = 0;
- for (Interface::Iterator it = interface->iterator();
- !it.done(); it.Advance(), ++i) {
+ for (ModuleDescriptor::Iterator it = descriptor->iterator(); !it.done();
+ it.Advance(), ++i) {
Variable* var = scope->LookupLocal(it.name());
info->set_name(i, *(it.name()->string()));
info->set_mode(i, var->mode());
- DCHECK((var->mode() == MODULE) == (it.interface()->IsModule()));
- if (var->mode() == MODULE) {
- DCHECK(it.interface()->IsFrozen());
- DCHECK(it.interface()->Index() >= 0);
- info->set_index(i, it.interface()->Index());
- } else {
- DCHECK(var->index() >= 0);
- info->set_index(i, var->index());
- }
+ DCHECK(var->index() >= 0);
+ info->set_index(i, var->index());
}
DCHECK(i == info->length());
return info;
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index 1d9f06fde8..70a17cd7d4 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -6,8 +6,8 @@
#define V8_SCOPEINFO_H_
#include "src/allocation.h"
+#include "src/modules.h"
#include "src/variables.h"
-#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -120,8 +120,8 @@ class ModuleInfo: public FixedArray {
return static_cast<ModuleInfo*>(FixedArray::cast(description));
}
- static Handle<ModuleInfo> Create(
- Isolate* isolate, Interface* interface, Scope* scope);
+ static Handle<ModuleInfo> Create(Isolate* isolate,
+ ModuleDescriptor* descriptor, Scope* scope);
// Index of module's context in host context.
int host_index() { return Smi::cast(get(HOST_OFFSET))->value(); }
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 39b67a8864..35449643ce 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -34,8 +34,7 @@ Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
VariableMode mode, bool is_valid_lhs,
Variable::Kind kind,
InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag,
- Interface* interface) {
+ MaybeAssignedFlag maybe_assigned_flag) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
// FIXME(marja): fix the type of Lookup.
@@ -44,9 +43,8 @@ Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
DCHECK(p->key == name);
- p->value = new (zone())
- Variable(scope, name, mode, is_valid_lhs, kind, initialization_flag,
- maybe_assigned_flag, interface);
+ p->value = new (zone()) Variable(scope, name, mode, is_valid_lhs, kind,
+ initialization_flag, maybe_assigned_flag);
}
return reinterpret_cast<Variable*>(p->value);
}
@@ -67,19 +65,17 @@ Variable* VariableMap::Lookup(const AstRawString* name) {
// ----------------------------------------------------------------------------
// Implementation of Scope
-Scope::Scope(Scope* outer_scope, ScopeType scope_type,
- AstValueFactory* ast_value_factory, Zone* zone)
- : isolate_(zone->isolate()),
- inner_scopes_(4, zone),
+Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
+ AstValueFactory* ast_value_factory)
+ : inner_scopes_(4, zone),
variables_(zone),
internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
decls_(4, zone),
- interface_(FLAG_harmony_modules &&
- (scope_type == MODULE_SCOPE || scope_type == SCRIPT_SCOPE)
- ? Interface::NewModule(zone) : NULL),
+ module_descriptor_(
+ scope_type == MODULE_SCOPE ? ModuleDescriptor::New(zone) : NULL),
already_resolved_(false),
ast_value_factory_(ast_value_factory),
zone_(zone) {
@@ -90,20 +86,16 @@ Scope::Scope(Scope* outer_scope, ScopeType scope_type,
}
-Scope::Scope(Scope* inner_scope,
- ScopeType scope_type,
- Handle<ScopeInfo> scope_info,
- AstValueFactory* value_factory,
- Zone* zone)
- : isolate_(zone->isolate()),
- inner_scopes_(4, zone),
+Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
+ Handle<ScopeInfo> scope_info, AstValueFactory* value_factory)
+ : inner_scopes_(4, zone),
variables_(zone),
internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
decls_(4, zone),
- interface_(NULL),
+ module_descriptor_(NULL),
already_resolved_(true),
ast_value_factory_(value_factory),
zone_(zone) {
@@ -118,17 +110,17 @@ Scope::Scope(Scope* inner_scope,
}
-Scope::Scope(Scope* inner_scope, const AstRawString* catch_variable_name,
- AstValueFactory* value_factory, Zone* zone)
- : isolate_(zone->isolate()),
- inner_scopes_(1, zone),
+Scope::Scope(Zone* zone, Scope* inner_scope,
+ const AstRawString* catch_variable_name,
+ AstValueFactory* value_factory)
+ : inner_scopes_(1, zone),
variables_(zone),
internals_(0, zone),
temps_(0, zone),
params_(0, zone),
unresolved_(0, zone),
decls_(0, zone),
- interface_(NULL),
+ module_descriptor_(NULL),
already_resolved_(true),
ast_value_factory_(value_factory),
zone_(zone) {
@@ -154,6 +146,7 @@ void Scope::SetDefaults(ScopeType scope_type,
scope_name_ = ast_value_factory_->empty_string();
dynamics_ = NULL;
receiver_ = NULL;
+ new_target_ = nullptr;
function_ = NULL;
arguments_ = NULL;
illegal_redecl_ = NULL;
@@ -162,18 +155,16 @@ void Scope::SetDefaults(ScopeType scope_type,
scope_calls_eval_ = false;
scope_uses_arguments_ = false;
scope_uses_super_property_ = false;
- scope_uses_super_constructor_call_ = false;
scope_uses_this_ = false;
asm_module_ = false;
asm_function_ = outer_scope != NULL && outer_scope->asm_module_;
- // Inherit the strict mode from the parent scope.
- strict_mode_ = outer_scope != NULL ? outer_scope->strict_mode_ : SLOPPY;
+ // Inherit the language mode from the parent scope.
+ language_mode_ = outer_scope != NULL ? outer_scope->language_mode_ : SLOPPY;
outer_scope_calls_sloppy_eval_ = false;
inner_scope_calls_eval_ = false;
inner_scope_uses_arguments_ = false;
inner_scope_uses_this_ = false;
inner_scope_uses_super_property_ = false;
- inner_scope_uses_super_constructor_call_ = false;
force_eager_compilation_ = false;
force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
? outer_scope->has_forced_context_allocation() : false;
@@ -182,29 +173,29 @@ void Scope::SetDefaults(ScopeType scope_type,
num_heap_slots_ = 0;
num_modules_ = 0;
module_var_ = NULL,
+ rest_parameter_ = NULL;
+ rest_index_ = -1;
scope_info_ = scope_info;
start_position_ = RelocInfo::kNoPosition;
end_position_ = RelocInfo::kNoPosition;
if (!scope_info.is_null()) {
scope_calls_eval_ = scope_info->CallsEval();
- strict_mode_ = scope_info->strict_mode();
+ language_mode_ = scope_info->language_mode();
}
}
-Scope* Scope::DeserializeScopeChain(Context* context, Scope* script_scope,
- Zone* zone) {
+Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
+ Context* context, Scope* script_scope) {
// Reconstruct the outer scope chain from a closure's context chain.
Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
bool contains_with = false;
while (!context->IsNativeContext()) {
if (context->IsWithContext()) {
- Scope* with_scope = new(zone) Scope(current_scope,
- WITH_SCOPE,
- Handle<ScopeInfo>::null(),
- script_scope->ast_value_factory_,
- zone);
+ Scope* with_scope = new (zone)
+ Scope(zone, current_scope, WITH_SCOPE, Handle<ScopeInfo>::null(),
+ script_scope->ast_value_factory_);
current_scope = with_scope;
// All the inner scopes are inside a with.
contains_with = true;
@@ -213,41 +204,33 @@ Scope* Scope::DeserializeScopeChain(Context* context, Scope* script_scope,
}
} else if (context->IsScriptContext()) {
ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
- current_scope = new(zone) Scope(current_scope,
- SCRIPT_SCOPE,
- Handle<ScopeInfo>(scope_info),
- script_scope->ast_value_factory_,
- zone);
+ current_scope = new (zone) Scope(zone, current_scope, SCRIPT_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ script_scope->ast_value_factory_);
} else if (context->IsModuleContext()) {
ScopeInfo* scope_info = ScopeInfo::cast(context->module()->scope_info());
- current_scope = new(zone) Scope(current_scope,
- MODULE_SCOPE,
- Handle<ScopeInfo>(scope_info),
- script_scope->ast_value_factory_,
- zone);
+ current_scope = new (zone) Scope(zone, current_scope, MODULE_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ script_scope->ast_value_factory_);
} else if (context->IsFunctionContext()) {
ScopeInfo* scope_info = context->closure()->shared()->scope_info();
- current_scope = new(zone) Scope(current_scope,
- FUNCTION_SCOPE,
- Handle<ScopeInfo>(scope_info),
- script_scope->ast_value_factory_,
- zone);
+ current_scope = new (zone) Scope(zone, current_scope, FUNCTION_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ script_scope->ast_value_factory_);
if (scope_info->IsAsmFunction()) current_scope->asm_function_ = true;
if (scope_info->IsAsmModule()) current_scope->asm_module_ = true;
} else if (context->IsBlockContext()) {
ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
- current_scope = new(zone) Scope(current_scope,
- BLOCK_SCOPE,
- Handle<ScopeInfo>(scope_info),
- script_scope->ast_value_factory_,
- zone);
+ current_scope = new (zone)
+ Scope(zone, current_scope, BLOCK_SCOPE, Handle<ScopeInfo>(scope_info),
+ script_scope->ast_value_factory_);
} else {
DCHECK(context->IsCatchContext());
String* name = String::cast(context->extension());
current_scope = new (zone) Scope(
- current_scope,
+ zone, current_scope,
script_scope->ast_value_factory_->GetString(Handle<String>(name)),
- script_scope->ast_value_factory_, zone);
+ script_scope->ast_value_factory_);
}
if (contains_with) current_scope->RecordWithStatement();
if (innermost_scope == NULL) innermost_scope = current_scope;
@@ -289,11 +272,6 @@ bool Scope::Analyze(CompilationInfo* info) {
: FLAG_print_scopes) {
scope->Print();
}
-
- if (FLAG_harmony_modules && FLAG_print_interfaces && top->is_script_scope()) {
- PrintF("global : ");
- top->interface()->Print();
- }
#endif
info->PrepareForCompilation(scope);
@@ -301,7 +279,7 @@ bool Scope::Analyze(CompilationInfo* info) {
}
-void Scope::Initialize() {
+void Scope::Initialize(bool subclass_constructor) {
DCHECK(!already_resolved());
// Add this scope as a new inner scope of the outer scope.
@@ -321,15 +299,21 @@ void Scope::Initialize() {
// such parameter is 'this' which is passed on the stack when
// invoking scripts
if (is_declaration_scope()) {
- Variable* var =
- variables_.Declare(this,
- ast_value_factory_->this_string(),
- VAR,
- false,
- Variable::THIS,
- kCreatedInitialized);
+ DCHECK(!subclass_constructor || is_function_scope());
+ Variable* var = variables_.Declare(
+ this, ast_value_factory_->this_string(),
+ subclass_constructor ? CONST : VAR, false, Variable::THIS,
+ subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
var->AllocateTo(Variable::PARAMETER, -1);
receiver_ = var;
+
+ if (subclass_constructor) {
+ new_target_ = variables_.Declare(
+ this, ast_value_factory_->new_target_string(), CONST, false,
+ Variable::NEW_TARGET, kCreatedInitialized);
+ new_target_->AllocateTo(Variable::PARAMETER, -2);
+ new_target_->set_is_used();
+ }
} else {
DCHECK(outer_scope() != NULL);
receiver_ = outer_scope()->receiver();
@@ -378,8 +362,6 @@ Scope* Scope::FinalizeBlockScope() {
// Propagate usage flags to outer scope.
if (uses_arguments()) outer_scope_->RecordArgumentsUsage();
if (uses_super_property()) outer_scope_->RecordSuperPropertyUsage();
- if (uses_super_constructor_call())
- outer_scope_->RecordSuperConstructorCallUsage();
if (uses_this()) outer_scope_->RecordThisUsage();
return NULL;
@@ -462,11 +444,17 @@ Variable* Scope::Lookup(const AstRawString* name) {
}
-Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode) {
+Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode,
+ bool is_rest) {
DCHECK(!already_resolved());
DCHECK(is_function_scope());
Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
kCreatedInitialized);
+ if (is_rest) {
+ DCHECK_NULL(rest_parameter_);
+ rest_parameter_ = var;
+ rest_index_ = num_parameters();
+ }
params_.Add(var, zone());
return var;
}
@@ -474,8 +462,7 @@ Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode) {
Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag,
- Interface* interface) {
+ MaybeAssignedFlag maybe_assigned_flag) {
DCHECK(!already_resolved());
// This function handles VAR, LET, and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
@@ -483,7 +470,7 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
DCHECK(IsDeclaredVariableMode(mode));
++num_var_or_const_;
return variables_.Declare(this, name, mode, true, Variable::NORMAL, init_flag,
- maybe_assigned_flag, interface);
+ maybe_assigned_flag);
}
@@ -667,7 +654,7 @@ bool Scope::AllocateVariables(CompilationInfo* info, AstNodeFactory* factory) {
if (!ResolveVariablesRecursively(info, factory)) return false;
// 4) Allocate variables.
- AllocateVariablesRecursively();
+ AllocateVariablesRecursively(info->isolate());
return true;
}
@@ -756,18 +743,17 @@ Scope* Scope::DeclarationScope() {
}
-Handle<ScopeInfo> Scope::GetScopeInfo() {
+Handle<ScopeInfo> Scope::GetScopeInfo(Isolate* isolate) {
if (scope_info_.is_null()) {
- scope_info_ = ScopeInfo::Create(this, zone());
+ scope_info_ = ScopeInfo::Create(isolate, zone(), this);
}
return scope_info_;
}
-void Scope::GetNestedScopeChain(
- List<Handle<ScopeInfo> >* chain,
- int position) {
- if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo()));
+void Scope::GetNestedScopeChain(Isolate* isolate,
+ List<Handle<ScopeInfo> >* chain, int position) {
+ if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo(isolate)));
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* scope = inner_scopes_[i];
@@ -775,7 +761,7 @@ void Scope::GetNestedScopeChain(
int end_pos = scope->end_position();
DCHECK(beg_pos >= 0 && end_pos >= 0);
if (beg_pos <= position && position < end_pos) {
- scope->GetNestedScopeChain(chain, position);
+ scope->GetNestedScopeChain(isolate, chain, position);
return;
}
}
@@ -893,7 +879,9 @@ void Scope::Print(int n) {
if (HasTrivialOuterContext()) {
Indent(n1, "// scope has trivial outer context\n");
}
- if (strict_mode() == STRICT) {
+ if (is_strong(language_mode())) {
+ Indent(n1, "// strong mode scope\n");
+ } else if (is_strict(language_mode())) {
Indent(n1, "// strict mode scope\n");
}
if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
@@ -902,17 +890,12 @@ void Scope::Print(int n) {
if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
if (scope_uses_super_property_)
Indent(n1, "// scope uses 'super' property\n");
- if (scope_uses_super_constructor_call_)
- Indent(n1, "// scope uses 'super' constructor\n");
if (scope_uses_this_) Indent(n1, "// scope uses 'this'\n");
if (inner_scope_uses_arguments_) {
Indent(n1, "// inner scope uses 'arguments'\n");
}
if (inner_scope_uses_super_property_)
Indent(n1, "// inner scope uses 'super' property\n");
- if (inner_scope_uses_super_constructor_call_) {
- Indent(n1, "// inner scope uses 'super' constructor\n");
- }
if (inner_scope_uses_this_) Indent(n1, "// inner scope uses 'this'\n");
if (outer_scope_calls_sloppy_eval_) {
Indent(n1, "// outer scope calls 'eval' in sloppy context\n");
@@ -1104,42 +1087,6 @@ bool Scope::ResolveVariable(CompilationInfo* info, VariableProxy* proxy,
DCHECK(var != NULL);
if (proxy->is_assigned()) var->set_maybe_assigned();
- if (FLAG_harmony_modules) {
- bool ok;
-#ifdef DEBUG
- if (FLAG_print_interface_details) {
- PrintF("# Resolve %.*s:\n", var->raw_name()->length(),
- var->raw_name()->raw_data());
- }
-#endif
- proxy->interface()->Unify(var->interface(), zone(), &ok);
- if (!ok) {
-#ifdef DEBUG
- if (FLAG_print_interfaces) {
- PrintF("SCOPES TYPE ERROR\n");
- PrintF("proxy: ");
- proxy->interface()->Print();
- PrintF("var: ");
- var->interface()->Print();
- }
-#endif
-
- // Inconsistent use of module. Throw a syntax error.
- // TODO(rossberg): generate more helpful error message.
- MessageLocation location(
- info->script(), proxy->position(), proxy->position());
- Isolate* isolate = info->isolate();
- Factory* factory = isolate->factory();
- Handle<JSArray> array = factory->NewJSArray(1);
- JSObject::SetElement(array, 0, var->name(), NONE, STRICT).Assert();
- Handle<Object> error;
- MaybeHandle<Object> maybe_error =
- factory->NewSyntaxError("module_type_error", array);
- if (maybe_error.ToHandle(&error)) isolate->Throw(*error, &location);
- return false;
- }
- }
-
proxy->BindTo(var);
return true;
@@ -1189,10 +1136,6 @@ void Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
inner->inner_scope_uses_super_property_) {
inner_scope_uses_super_property_ = true;
}
- if (inner->uses_super_constructor_call() ||
- inner->inner_scope_uses_super_constructor_call_) {
- inner_scope_uses_super_constructor_call_ = true;
- }
if (inner->scope_uses_this_ || inner->inner_scope_uses_this_) {
inner_scope_uses_this_ = true;
}
@@ -1211,15 +1154,10 @@ bool Scope::MustAllocate(Variable* var) {
// Give var a read/write use if there is a chance it might be accessed
// via an eval() call. This is only possible if the variable has a
// visible name.
- if ((var->is_this() || !var->raw_name()->IsEmpty()) &&
- (var->has_forced_context_allocation() ||
- scope_calls_eval_ ||
- inner_scope_calls_eval_ ||
- scope_contains_with_ ||
- is_catch_scope() ||
- is_block_scope() ||
- is_module_scope() ||
- is_script_scope())) {
+ if ((var->is_this() || var->is_new_target() || !var->raw_name()->IsEmpty()) &&
+ (var->has_forced_context_allocation() || scope_calls_eval_ ||
+ inner_scope_calls_eval_ || scope_contains_with_ || is_catch_scope() ||
+ is_block_scope() || is_module_scope() || is_script_scope())) {
var->set_is_used();
if (scope_calls_eval_ || inner_scope_calls_eval_) var->set_maybe_assigned();
}
@@ -1250,10 +1188,10 @@ bool Scope::MustAllocateInContext(Variable* var) {
}
-bool Scope::HasArgumentsParameter() {
+bool Scope::HasArgumentsParameter(Isolate* isolate) {
for (int i = 0; i < params_.length(); i++) {
if (params_[i]->name().is_identical_to(
- isolate_->factory()->arguments_string())) {
+ isolate->factory()->arguments_string())) {
return true;
}
}
@@ -1271,14 +1209,14 @@ void Scope::AllocateHeapSlot(Variable* var) {
}
-void Scope::AllocateParameterLocals() {
+void Scope::AllocateParameterLocals(Isolate* isolate) {
DCHECK(is_function_scope());
Variable* arguments = LookupLocal(ast_value_factory_->arguments_string());
DCHECK(arguments != NULL); // functions have 'arguments' declared implicitly
bool uses_sloppy_arguments = false;
- if (MustAllocate(arguments) && !HasArgumentsParameter()) {
+ if (MustAllocate(arguments) && !HasArgumentsParameter(isolate)) {
// 'arguments' is used. Unless there is also a parameter called
// 'arguments', we must be conservative and allocate all parameters to
// the context assuming they will be captured by the arguments object.
@@ -1295,7 +1233,11 @@ void Scope::AllocateParameterLocals() {
// In strict mode 'arguments' does not alias formal parameters.
// Therefore in strict mode we allocate parameters as if 'arguments'
// were not used.
- uses_sloppy_arguments = strict_mode() == SLOPPY;
+ uses_sloppy_arguments = is_sloppy(language_mode());
+ }
+
+ if (rest_parameter_ && !MustAllocate(rest_parameter_)) {
+ rest_parameter_ = NULL;
}
// The same parameter may occur multiple times in the parameters_ list.
@@ -1304,6 +1246,8 @@ void Scope::AllocateParameterLocals() {
// order is relevant!
for (int i = params_.length() - 1; i >= 0; --i) {
Variable* var = params_[i];
+ if (var == rest_parameter_) continue;
+
DCHECK(var->scope() == this);
if (uses_sloppy_arguments || has_forced_context_allocation()) {
// Force context allocation of the parameter.
@@ -1327,9 +1271,9 @@ void Scope::AllocateParameterLocals() {
}
-void Scope::AllocateNonParameterLocal(Variable* var) {
+void Scope::AllocateNonParameterLocal(Isolate* isolate, Variable* var) {
DCHECK(var->scope() == this);
- DCHECK(!var->IsVariable(isolate_->factory()->dot_result_string()) ||
+ DCHECK(!var->IsVariable(isolate->factory()->dot_result_string()) ||
!var->IsStackLocal());
if (var->IsUnallocated() && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
@@ -1341,14 +1285,14 @@ void Scope::AllocateNonParameterLocal(Variable* var) {
}
-void Scope::AllocateNonParameterLocals() {
+void Scope::AllocateNonParameterLocals(Isolate* isolate) {
// All variables that have no rewrite yet are non-parameter locals.
for (int i = 0; i < temps_.length(); i++) {
- AllocateNonParameterLocal(temps_[i]);
+ AllocateNonParameterLocal(isolate, temps_[i]);
}
for (int i = 0; i < internals_.length(); i++) {
- AllocateNonParameterLocal(internals_[i]);
+ AllocateNonParameterLocal(isolate, internals_[i]);
}
ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
@@ -1361,7 +1305,7 @@ void Scope::AllocateNonParameterLocals() {
vars.Sort(VarAndOrder::Compare);
int var_count = vars.length();
for (int i = 0; i < var_count; i++) {
- AllocateNonParameterLocal(vars[i].var());
+ AllocateNonParameterLocal(isolate, vars[i].var());
}
// For now, function_ must be allocated at the very end. If it gets
@@ -1369,15 +1313,19 @@ void Scope::AllocateNonParameterLocals() {
// because of the current ScopeInfo implementation (see
// ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
if (function_ != NULL) {
- AllocateNonParameterLocal(function_->proxy()->var());
+ AllocateNonParameterLocal(isolate, function_->proxy()->var());
+ }
+
+ if (rest_parameter_) {
+ AllocateNonParameterLocal(isolate, rest_parameter_);
}
}
-void Scope::AllocateVariablesRecursively() {
+void Scope::AllocateVariablesRecursively(Isolate* isolate) {
// Allocate variables for inner scopes.
for (int i = 0; i < inner_scopes_.length(); i++) {
- inner_scopes_[i]->AllocateVariablesRecursively();
+ inner_scopes_[i]->AllocateVariablesRecursively(isolate);
}
// If scope is already resolved, we still need to allocate
@@ -1389,8 +1337,8 @@ void Scope::AllocateVariablesRecursively() {
// Allocate variables for this scope.
// Parameters must be allocated first, if any.
- if (is_function_scope()) AllocateParameterLocals();
- AllocateNonParameterLocals();
+ if (is_function_scope()) AllocateParameterLocals(isolate);
+ AllocateNonParameterLocals(isolate);
// Force allocation of a context for this scope if necessary. For a 'with'
// scope and for a function scope that makes an 'eval' call we need a context,
@@ -1413,7 +1361,7 @@ void Scope::AllocateVariablesRecursively() {
void Scope::AllocateModulesRecursively(Scope* host_scope) {
if (already_resolved()) return;
if (is_module_scope()) {
- DCHECK(interface_->IsFrozen());
+ DCHECK(module_descriptor_->IsFrozen());
DCHECK(module_var_ == NULL);
module_var_ =
host_scope->NewInternal(ast_value_factory_->dot_module_string());
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 8d79006878..c58d124939 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -24,8 +24,7 @@ class VariableMap: public ZoneHashMap {
Variable* Declare(Scope* scope, const AstRawString* name, VariableMode mode,
bool is_valid_lhs, Variable::Kind kind,
InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
- Interface* interface = Interface::NewValue());
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
Variable* Lookup(const AstRawString* name);
@@ -72,23 +71,23 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Construction
- Scope(Scope* outer_scope, ScopeType scope_type,
- AstValueFactory* value_factory, Zone* zone);
+ Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
+ AstValueFactory* value_factory);
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
static bool Analyze(CompilationInfo* info);
- static Scope* DeserializeScopeChain(Context* context, Scope* script_scope,
- Zone* zone);
+ static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
+ Context* context, Scope* script_scope);
// The scope name is only used for printing/debugging.
void SetScopeName(const AstRawString* scope_name) {
scope_name_ = scope_name;
}
- void Initialize();
+ void Initialize(bool subclass_constructor = false);
// Checks if the block scope is redundant, i.e. it does not contain any
// block scoped declarations. In that case it is removed from the scope
@@ -125,14 +124,14 @@ class Scope: public ZoneObject {
// Declare a parameter in this scope. When there are duplicated
// parameters the rightmost one 'wins'. However, the implementation
// expects all parameters to be declared and from left to right.
- Variable* DeclareParameter(const AstRawString* name, VariableMode mode);
+ Variable* DeclareParameter(const AstRawString* name, VariableMode mode,
+ bool is_rest = false);
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
- Interface* interface = Interface::NewValue());
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
// Declare an implicit global variable in this scope which must be a
// script scope. The variable was introduced (possibly from an inner
@@ -143,14 +142,12 @@ class Scope: public ZoneObject {
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
- Interface* interface = Interface::NewValue(),
int position = RelocInfo::kNoPosition) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
DCHECK(!already_resolved());
- VariableProxy* proxy =
- factory->NewVariableProxy(name, false, interface, position);
+ VariableProxy* proxy = factory->NewVariableProxy(name, false, position);
unresolved_.Add(proxy, zone_);
return proxy;
}
@@ -216,16 +213,13 @@ class Scope: public ZoneObject {
// Inform the scope that the corresponding code uses "super".
void RecordSuperPropertyUsage() { scope_uses_super_property_ = true; }
- // Inform the scope that the corresponding code invokes "super" constructor.
- void RecordSuperConstructorCallUsage() {
- scope_uses_super_constructor_call_ = true;
- }
-
// Inform the scope that the corresponding code uses "this".
void RecordThisUsage() { scope_uses_this_ = true; }
- // Set the strict mode flag (unless disabled by a global flag).
- void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
+ // Set the language mode flag (unless disabled by a global flag).
+ void SetLanguageMode(LanguageMode language_mode) {
+ language_mode_ = language_mode;
+ }
// Set the ASM module flag.
void SetAsmModule() { asm_module_ = true; }
@@ -289,13 +283,13 @@ class Scope: public ZoneObject {
is_module_scope() || is_script_scope();
}
bool is_strict_eval_scope() const {
- return is_eval_scope() && strict_mode_ == STRICT;
+ return is_eval_scope() && is_strict(language_mode_);
}
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
bool calls_sloppy_eval() {
- return scope_calls_eval_ && strict_mode_ == SLOPPY;
+ return scope_calls_eval_ && is_sloppy(language_mode_);
}
bool outer_scope_calls_sloppy_eval() const {
return outer_scope_calls_sloppy_eval_;
@@ -318,14 +312,6 @@ class Scope: public ZoneObject {
bool inner_uses_super_property() const {
return inner_scope_uses_super_property_;
}
- // Does this scope calls "super" constructor.
- bool uses_super_constructor_call() const {
- return scope_uses_super_constructor_call_;
- }
- // Does any inner scope calls "super" constructor.
- bool inner_uses_super_constructor_call() const {
- return inner_scope_uses_super_constructor_call_;
- }
// Does this scope access "this".
bool uses_this() const { return scope_uses_this_; }
// Does any inner scope access "this".
@@ -338,11 +324,14 @@ class Scope: public ZoneObject {
ScopeType scope_type() const { return scope_type_; }
// The language mode of this scope.
- StrictMode strict_mode() const { return strict_mode_; }
+ LanguageMode language_mode() const { return language_mode_; }
- // The variable corresponding the 'this' value.
+ // The variable corresponding to the 'this' value.
Variable* receiver() { return receiver_; }
+ // The variable corresponding to the 'new.target' value.
+ Variable* new_target_var() { return new_target_; }
+
// The variable holding the function literal for named function
// literals, or NULL. Only valid for function scopes.
VariableDeclaration* function() const {
@@ -357,8 +346,36 @@ class Scope: public ZoneObject {
return params_[index];
}
+ // Returns the default function arity --- does not include rest parameters.
+ int default_function_length() const {
+ int count = params_.length();
+ if (rest_index_ >= 0) {
+ DCHECK(count > 0);
+ DCHECK(is_function_scope());
+ --count;
+ }
+ return count;
+ }
+
int num_parameters() const { return params_.length(); }
+ // A function can have at most one rest parameter. Returns Variable* or NULL.
+ Variable* rest_parameter(int* index) const {
+ *index = rest_index_;
+ if (rest_index_ < 0) return NULL;
+ return rest_parameter_;
+ }
+
+ bool has_rest_parameter() const {
+ return rest_index_ >= 0;
+ }
+
+ bool is_simple_parameter_list() const {
+ DCHECK(is_function_scope());
+ if (rest_index_ >= 0) return false;
+ return true;
+ }
+
// The local variable 'arguments' if we need to allocate it; NULL otherwise.
Variable* arguments() const { return arguments_; }
@@ -371,8 +388,8 @@ class Scope: public ZoneObject {
// The scope immediately surrounding this scope, or NULL.
Scope* outer_scope() const { return outer_scope_; }
- // The interface as inferred so far; only for module scopes.
- Interface* interface() const { return interface_; }
+ // The ModuleDescriptor for this scope; only for module scopes.
+ ModuleDescriptor* module() const { return module_descriptor_; }
// ---------------------------------------------------------------------------
// Variable allocation.
@@ -426,13 +443,13 @@ class Scope: public ZoneObject {
// where var declarations will be hoisted to in the implementation.
Scope* DeclarationScope();
- Handle<ScopeInfo> GetScopeInfo();
+ Handle<ScopeInfo> GetScopeInfo(Isolate* isolate);
// Get the chain of nested scopes within this scope for the source statement
// position. The scopes will be added to the list from the outermost scope to
// the innermost scope. Only nested block, catch or with scopes are tracked
// and will be returned, but no inner function scopes.
- void GetNestedScopeChain(List<Handle<ScopeInfo> >* chain,
+ void GetNestedScopeChain(Isolate* isolate, List<Handle<ScopeInfo> >* chain,
int statement_position);
// ---------------------------------------------------------------------------
@@ -446,6 +463,13 @@ class Scope: public ZoneObject {
return variables_.Lookup(name) != NULL;
}
+ bool IsDeclaredParameter(const AstRawString* name) {
+ // If IsSimpleParameterList is false, duplicate parameters are not allowed,
+ // however `arguments` may be allowed if function is not strict code. Thus,
+ // the assumptions explained above do not hold.
+ return params_.Contains(variables_.Lookup(name));
+ }
+
// ---------------------------------------------------------------------------
// Debugging.
@@ -458,8 +482,6 @@ class Scope: public ZoneObject {
protected:
friend class ParserFactory;
- Isolate* const isolate_;
-
// Scope tree.
Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
@@ -492,10 +514,12 @@ class Scope: public ZoneObject {
Variable* receiver_;
// Function variable, if any; function scopes only.
VariableDeclaration* function_;
+ // new.target variable, function scopes only.
+ Variable* new_target_;
// Convenience variable; function scopes only.
Variable* arguments_;
- // Interface; module scopes only.
- Interface* interface_;
+ // Module descriptor; module scopes only.
+ ModuleDescriptor* module_descriptor_;
// Illegal redeclaration.
Expression* illegal_redecl_;
@@ -513,16 +537,14 @@ class Scope: public ZoneObject {
bool scope_uses_arguments_;
// This scope uses "super" property ('super.foo').
bool scope_uses_super_property_;
- // This scope uses "super" constructor ('super(..)').
- bool scope_uses_super_constructor_call_;
// This scope uses "this".
bool scope_uses_this_;
// This scope contains an "use asm" annotation.
bool asm_module_;
// This scope's outer context is an asm module.
bool asm_function_;
- // The strict mode of this scope.
- StrictMode strict_mode_;
+ // The language mode of this scope.
+ LanguageMode language_mode_;
// Source positions.
int start_position_;
int end_position_;
@@ -532,7 +554,6 @@ class Scope: public ZoneObject {
bool inner_scope_calls_eval_;
bool inner_scope_uses_arguments_;
bool inner_scope_uses_super_property_;
- bool inner_scope_uses_super_constructor_call_;
bool inner_scope_uses_this_;
bool force_eager_compilation_;
bool force_context_allocation_;
@@ -554,6 +575,10 @@ class Scope: public ZoneObject {
// For module scopes, the host scope's internal variable binding this module.
Variable* module_var_;
+ // Rest parameter
+ Variable* rest_parameter_;
+ int rest_index_;
+
// Serialized scope info support.
Handle<ScopeInfo> scope_info_;
bool already_resolved() { return already_resolved_; }
@@ -627,15 +652,15 @@ class Scope: public ZoneObject {
// Predicates.
bool MustAllocate(Variable* var);
bool MustAllocateInContext(Variable* var);
- bool HasArgumentsParameter();
+ bool HasArgumentsParameter(Isolate* isolate);
// Variable allocation.
void AllocateStackSlot(Variable* var);
void AllocateHeapSlot(Variable* var);
- void AllocateParameterLocals();
- void AllocateNonParameterLocal(Variable* var);
- void AllocateNonParameterLocals();
- void AllocateVariablesRecursively();
+ void AllocateParameterLocals(Isolate* isolate);
+ void AllocateNonParameterLocal(Isolate* isolate, Variable* var);
+ void AllocateNonParameterLocals(Isolate* isolate);
+ void AllocateVariablesRecursively(Isolate* isolate);
void AllocateModulesRecursively(Scope* host_scope);
// Resolve and fill in the allocation information for all variables
@@ -651,13 +676,12 @@ class Scope: public ZoneObject {
private:
// Construct a scope based on the scope info.
- Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
- AstValueFactory* value_factory, Zone* zone);
+ Scope(Zone* zone, Scope* inner_scope, ScopeType type,
+ Handle<ScopeInfo> scope_info, AstValueFactory* value_factory);
// Construct a catch scope with a binding for the name.
- Scope(Scope* inner_scope,
- const AstRawString* catch_variable_name,
- AstValueFactory* value_factory, Zone* zone);
+ Scope(Zone* zone, Scope* inner_scope, const AstRawString* catch_variable_name,
+ AstValueFactory* value_factory);
void AddInnerScope(Scope* inner_scope) {
if (inner_scope != NULL) {
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index c5f8b6344e..28480e649c 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -9,6 +9,7 @@
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/compiler.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
@@ -95,12 +96,12 @@ void ExternalReferenceTable::Add(Address address,
TypeCode type,
uint16_t id,
const char* name) {
- DCHECK_NE(NULL, address);
+ DCHECK_NOT_NULL(address);
ExternalReferenceEntry entry;
entry.address = address;
entry.code = EncodeExternal(type, id);
entry.name = name;
- DCHECK_NE(0, entry.code);
+ DCHECK_NE(0u, entry.code);
// Assert that the code is added in ascending order to rule out duplicates.
DCHECK((size() == 0) || (code(size() - 1) < entry.code));
refs_.Add(entry);
@@ -163,8 +164,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
"std::log");
Add(ExternalReference::store_buffer_top(isolate).address(),
"store_buffer_top");
- Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
- "canonical_nan");
Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
Add(ExternalReference::get_date_field_function(isolate).address(),
"JSDate::GetField");
@@ -614,7 +613,9 @@ void Deserializer::DecodeReservation(
DCHECK_EQ(0, reservations_[NEW_SPACE].length());
STATIC_ASSERT(NEW_SPACE == 0);
int current_space = NEW_SPACE;
- for (const auto& r : res) {
+ for (int i = 0; i < res.length(); i++) {
+ SerializedData::Reservation r(0);
+ memcpy(&r, res.start() + i, sizeof(r));
reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
if (r.is_last()) current_space++;
}
@@ -633,6 +634,11 @@ void Deserializer::FlushICacheForNewCodeObjects() {
bool Deserializer::ReserveSpace() {
+#ifdef DEBUG
+ for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
+ CHECK(reservations_[i].length() > 0);
+ }
+#endif // DEBUG
if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
high_water_[i] = reservations_[i][0].start;
@@ -641,19 +647,25 @@ bool Deserializer::ReserveSpace() {
}
-void Deserializer::Deserialize(Isolate* isolate) {
+void Deserializer::Initialize(Isolate* isolate) {
+ DCHECK_NULL(isolate_);
+ DCHECK_NOT_NULL(isolate);
isolate_ = isolate;
- DCHECK(isolate_ != NULL);
- if (!ReserveSpace()) FatalProcessOutOfMemory("deserializing context");
+ DCHECK_NULL(external_reference_decoder_);
+ external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
+}
+
+
+void Deserializer::Deserialize(Isolate* isolate) {
+ Initialize(isolate);
+ if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
// No active threads.
- DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
+ DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles.
DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
- DCHECK_EQ(NULL, external_reference_decoder_);
- external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
isolate_->heap()->IterateSmiRoots(this);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
- isolate_->heap()->RepairFreeListsAfterBoot();
+ isolate_->heap()->RepairFreeListsAfterDeserialization();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
isolate_->heap()->set_native_contexts_list(
@@ -668,8 +680,6 @@ void Deserializer::Deserialize(Isolate* isolate) {
isolate_->heap()->undefined_value());
}
- isolate_->heap()->InitializeWeakObjectToCodeTable();
-
// Update data pointers to the external strings containing natives sources.
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
Object* source = isolate_->heap()->natives_source_cache()->get(i);
@@ -686,33 +696,52 @@ void Deserializer::Deserialize(Isolate* isolate) {
}
-void Deserializer::DeserializePartial(Isolate* isolate, Object** root,
- OnOOM on_oom) {
- isolate_ = isolate;
- for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
- DCHECK(reservations_[i].length() > 0);
- }
+MaybeHandle<Object> Deserializer::DeserializePartial(
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+ Handle<FixedArray>* outdated_contexts_out) {
+ Initialize(isolate);
if (!ReserveSpace()) {
- if (on_oom == FATAL_ON_OOM) FatalProcessOutOfMemory("deserialize context");
- *root = NULL;
- return;
- }
- if (external_reference_decoder_ == NULL) {
- external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
+ V8::FatalProcessOutOfMemory("deserialize context");
+ return MaybeHandle<Object>();
}
- DisallowHeapAllocation no_gc;
+ Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
+ attached_objects[kGlobalProxyReference] = global_proxy;
+ SetAttachedObjects(attached_objects);
+ DisallowHeapAllocation no_gc;
// Keep track of the code space start and end pointers in case new
// code objects were unserialized
OldSpace* code_space = isolate_->heap()->code_space();
Address start_address = code_space->top();
- VisitPointer(root);
+ Object* root;
+ Object* outdated_contexts;
+ VisitPointer(&root);
+ VisitPointer(&outdated_contexts);
// There's no code deserialized here. If this assert fires
// then that's changed and logging should be added to notify
// the profiler et al of the new code.
CHECK_EQ(start_address, code_space->top());
+ CHECK(outdated_contexts->IsFixedArray());
+ *outdated_contexts_out =
+ Handle<FixedArray>(FixedArray::cast(outdated_contexts), isolate);
+ return Handle<Object>(root, isolate);
+}
+
+
+MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
+ Isolate* isolate) {
+ Initialize(isolate);
+ if (!ReserveSpace()) {
+ return Handle<SharedFunctionInfo>();
+ } else {
+ deserializing_user_code_ = true;
+ DisallowHeapAllocation no_gc;
+ Object* root;
+ VisitPointer(&root);
+ return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
+ }
}
@@ -723,7 +752,7 @@ Deserializer::~Deserializer() {
delete external_reference_decoder_;
external_reference_decoder_ = NULL;
}
- if (attached_objects_) attached_objects_->Dispose();
+ attached_objects_.Dispose();
}
@@ -798,11 +827,12 @@ HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) {
HeapObject* Deserializer::GetBackReferencedObject(int space) {
HeapObject* obj;
+ BackReference back_reference(source_.GetInt());
if (space == LO_SPACE) {
- uint32_t index = source_.GetInt();
+ CHECK(back_reference.chunk_index() == 0);
+ uint32_t index = back_reference.large_object_index();
obj = deserialized_large_objects_[index];
} else {
- BackReference back_reference(source_.GetInt());
DCHECK(space < kNumberOfPreallocatedSpaces);
uint32_t chunk_index = back_reference.chunk_index();
DCHECK_LE(chunk_index, current_chunk_[space]);
@@ -860,7 +890,8 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
// Fix up strings from serialized user code.
if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj);
- *write_back = obj;
+ Object* write_back_obj = obj;
+ UnalignedCopy(write_back, &write_back_obj);
#ifdef DEBUG
if (obj->IsCode()) {
DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
@@ -868,6 +899,23 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
DCHECK(space_number != CODE_SPACE);
}
#endif
+#if V8_TARGET_ARCH_PPC && \
+ (ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL)
+ // If we're on a platform that uses function descriptors
+ // these jump tables make use of RelocInfo::INTERNAL_REFERENCE.
+ // As the V8 serialization code doesn't handle that relocation type
+ // we use this to fix up code that has function descriptors.
+ if (space_number == CODE_SPACE) {
+ Code* code = reinterpret_cast<Code*>(HeapObject::FromAddress(address));
+ for (RelocIterator it(code); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ Assembler::RelocateInternalReference(it.rinfo()->pc(), 0,
+ code->instruction_start());
+ }
+ }
+ }
+#endif
}
@@ -894,7 +942,7 @@ Address Deserializer::Allocate(int space_index, int size) {
} else {
DCHECK(space_index < kNumberOfPreallocatedSpaces);
Address address = high_water_[space_index];
- DCHECK_NE(NULL, address);
+ DCHECK_NOT_NULL(address);
high_water_[space_index] += size;
#ifdef DEBUG
// Assert that the current reserved chunk is still big enough.
@@ -970,9 +1018,9 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
new_object = isolate->builtins()->builtin(name); \
emit_write_barrier = false; \
} else if (where == kAttachedReference) { \
- DCHECK(deserializing_user_code()); \
int index = source_.GetInt(); \
- new_object = *attached_objects_->at(index); \
+ DCHECK(deserializing_user_code() || index == kGlobalProxyReference); \
+ new_object = *attached_objects_[index]; \
emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else { \
DCHECK(where == kBackrefWithSkip); \
@@ -1003,7 +1051,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
current = reinterpret_cast<Object**>(location_of_branch_data); \
current_was_incremented = true; \
} else { \
- *current = new_object; \
+ UnalignedCopy(current, &new_object); \
} \
} \
if (emit_write_barrier && write_barrier_needed) { \
@@ -1104,7 +1152,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
int root_id = RootArrayConstantFromByteCode(data);
Object* object = isolate->heap()->roots_array_start()[root_id];
DCHECK(!isolate->heap()->InNewSpace(object));
- *current++ = object;
+ UnalignedCopy(current++, &object);
break;
}
@@ -1116,7 +1164,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
reinterpret_cast<intptr_t>(current) + skip);
Object* object = isolate->heap()->roots_array_start()[root_id];
DCHECK(!isolate->heap()->InNewSpace(object));
- *current++ = object;
+ UnalignedCopy(current++, &object);
break;
}
@@ -1124,8 +1172,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
int repeats = source_.GetInt();
Object* object = current[-1];
DCHECK(!isolate->heap()->InNewSpace(object));
- for (int i = 0; i < repeats; i++) current[i] = object;
- current += repeats;
+ for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
break;
}
@@ -1139,10 +1186,10 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
case kFixedRepeat + 13:
case kFixedRepeat + 14: {
int repeats = RepeatsForCode(data);
- Object* object = current[-1];
+ Object* object;
+ UnalignedCopy(&object, current - 1);
DCHECK(!isolate->heap()->InNewSpace(object));
- for (int i = 0; i < repeats; i++) current[i] = object;
- current += repeats;
+ for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
break;
}
@@ -1161,16 +1208,16 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
- defined(V8_TARGET_ARCH_MIPS64)
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC) || V8_OOL_CONSTANT_POOL
// Deserialize a new object from pointer found in code and write
- // a pointer to it to the current object. Required only for MIPS or ARM
- // with ool constant pool, and omitted on the other architectures because
- // it is fully unrolled and would cause bloat.
+ // a pointer to it to the current object. Required only for MIPS, PPC or
+ // ARM with ool constant pool, and omitted on the other architectures
+ // because it is fully unrolled and would cause bloat.
ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
- // object. Required only for MIPS or ARM with ool constant pool.
+ // object. Required only for MIPS, PPC or ARM with ool constant pool.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
@@ -1187,7 +1234,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
- defined(V8_TARGET_ARCH_MIPS64)
+ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC)
// Find an object in the roots array and write a pointer to it to in code.
CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)
@@ -1248,13 +1295,14 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
}
case kNativesStringResource: {
+ DCHECK(!isolate_->heap()->deserialization_complete());
int index = source_.Get();
Vector<const char> source_vector = Natives::GetScriptSource(index);
NativesExternalStringResource* resource =
- new NativesExternalStringResource(isolate->bootstrapper(),
- source_vector.start(),
+ new NativesExternalStringResource(source_vector.start(),
source_vector.length());
- *current++ = reinterpret_cast<Object*>(resource);
+ Object* resource_obj = reinterpret_cast<Object*>(resource);
+ UnalignedCopy(current++, &resource_obj);
break;
}
@@ -1267,7 +1315,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
// Move to next reserved chunk.
chunk_index = ++current_chunk_[space];
- DCHECK_LT(chunk_index, reservation.length());
+ CHECK_LT(chunk_index, reservation.length());
high_water_[space] = reservation[chunk_index].start;
break;
}
@@ -1282,8 +1330,9 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
FOUR_CASES(kHotObject)
FOUR_CASES(kHotObject + 4) {
int index = data & kHotObjectIndexMask;
- *current = hot_objects_.Get(index);
- if (write_barrier_needed && isolate->heap()->InNewSpace(*current)) {
+ Object* hot_object = hot_objects_.Get(index);
+ UnalignedCopy(current, &hot_object);
+ if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
Address current_address = reinterpret_cast<Address>(current);
isolate->heap()->RecordWrite(
current_object_address,
@@ -1296,14 +1345,14 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
case kSynchronize: {
// If we get here then that indicates that you have a mismatch between
// the number of GC roots when serializing and deserializing.
- UNREACHABLE();
+ CHECK(false);
}
default:
- UNREACHABLE();
+ CHECK(false);
}
}
- DCHECK_EQ(limit, current);
+ CHECK_EQ(limit, current);
}
@@ -1334,7 +1383,7 @@ Serializer::~Serializer() {
void StartupSerializer::SerializeStrongReferences() {
Isolate* isolate = this->isolate();
// No active threads.
- CHECK_EQ(NULL, isolate->thread_manager()->FirstThreadStateInUse());
+ CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
@@ -1367,12 +1416,46 @@ void StartupSerializer::VisitPointers(Object** start, Object** end) {
}
-void PartialSerializer::Serialize(Object** object) {
- this->VisitPointer(object);
+void PartialSerializer::Serialize(Object** o) {
+ if ((*o)->IsContext()) {
+ Context* context = Context::cast(*o);
+ global_object_ = context->global_object();
+ back_reference_map()->AddGlobalProxy(context->global_proxy());
+ }
+ VisitPointer(o);
+ SerializeOutdatedContextsAsFixedArray();
Pad();
}
+void PartialSerializer::SerializeOutdatedContextsAsFixedArray() {
+ int length = outdated_contexts_.length();
+ if (length == 0) {
+ FixedArray* empty = isolate_->heap()->empty_fixed_array();
+ SerializeObject(empty, kPlain, kStartOfObject, 0);
+ } else {
+ // Serialize an imaginary fixed array containing outdated contexts.
+ int size = FixedArray::SizeFor(length);
+ Allocate(NEW_SPACE, size);
+ sink_->Put(kNewObject + NEW_SPACE, "emulated FixedArray");
+ sink_->PutInt(size >> kObjectAlignmentBits, "FixedArray size in words");
+ Map* map = isolate_->heap()->fixed_array_map();
+ SerializeObject(map, kPlain, kStartOfObject, 0);
+ Smi* length_smi = Smi::FromInt(length);
+ sink_->Put(kOnePointerRawData, "Smi");
+ for (int i = 0; i < kPointerSize; i++) {
+ sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte");
+ }
+ for (int i = 0; i < length; i++) {
+ BackReference back_ref = outdated_contexts_[i];
+ DCHECK(BackReferenceIsAlreadyAllocated(back_ref));
+ sink_->Put(kBackref + back_ref.space(), "BackRef");
+ sink_->PutInt(back_ref.reference(), "BackRefValue");
+ }
+ }
+}
+
+
bool Serializer::ShouldBeSkipped(Object** current) {
Object** roots = isolate()->heap()->roots_array_start();
return current == &roots[Heap::kStoreBufferTopRootIndex]
@@ -1464,6 +1547,26 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
}
+#ifdef DEBUG
+bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
+ DCHECK(reference.is_valid());
+ DCHECK(!reference.is_source());
+ DCHECK(!reference.is_global_proxy());
+ AllocationSpace space = reference.space();
+ int chunk_index = reference.chunk_index();
+ if (space == LO_SPACE) {
+ return chunk_index == 0 &&
+ reference.large_object_index() < seen_large_objects_index_;
+ } else if (chunk_index == completed_chunks_[space].length()) {
+ return reference.chunk_offset() < pending_chunk_[space];
+ } else {
+ return chunk_index < completed_chunks_[space].length() &&
+ reference.chunk_offset() < completed_chunks_[space][chunk_index];
+ }
+}
+#endif // DEBUG
+
+
bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (how_to_code == kPlain && where_to_point == kStartOfObject) {
@@ -1495,8 +1598,14 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
- sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source");
- sink_->PutInt(kSourceObjectReference, "kSourceObjectIndex");
+ sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
+ sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
+ } else if (back_reference.is_global_proxy()) {
+ FlushSkip(skip);
+ if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
+ DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
+ sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
+ sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
} else {
if (FLAG_trace_serializer) {
PrintF(" Encoding back reference to: ");
@@ -1512,6 +1621,7 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
"BackRefWithSkip");
sink_->PutInt(skip, "BackRefSkipDistance");
}
+ DCHECK(BackReferenceIsAlreadyAllocated(back_reference));
sink_->PutInt(back_reference.reference(), "BackRefValue");
hot_objects_.Add(obj);
@@ -1598,6 +1708,9 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array());
}
+ // Replace typed arrays by undefined.
+ if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
+
int root_index = root_index_map_.Lookup(obj);
if (root_index != RootIndexMap::kInvalidRootIndex) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
@@ -1629,6 +1742,15 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
serializer.Serialize();
+
+ if (obj->IsContext() &&
+ Context::cast(obj)->global_object() == global_object_) {
+ // Context refers to the current global object. This reference will
+ // become outdated after deserialization.
+ BackReference back_reference = back_reference_map_.Lookup(obj);
+ DCHECK(back_reference.is_valid());
+ outdated_contexts_.Add(back_reference);
+ }
}
@@ -1748,6 +1870,9 @@ void Serializer::ObjectSerializer::Serialize() {
PrintF("\n");
}
+ // We cannot serialize typed array objects correctly.
+ DCHECK(!object_->IsJSTypedArray());
+
if (object_->IsScript()) {
// Clear cached line ends.
Object* undefined = serializer_->isolate()->heap()->undefined_value();
@@ -1994,6 +2119,10 @@ int Serializer::ObjectSerializer::OutputRawData(
}
const char* description = code_object_ ? "Code" : "Byte";
+#ifdef MEMORY_SANITIZER
+ // Object sizes are usually rounded up with uninitialized padding space.
+ MSAN_MEMORY_IS_INITIALIZED(object_start + base, bytes_to_output);
+#endif // MEMORY_SANITIZER
sink_->PutRaw(object_start + base, bytes_to_output, description);
if (code_object_) delete[] object_start;
}
@@ -2261,67 +2390,52 @@ int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
}
-void CodeSerializer::SerializeSourceObject(HowToCode how_to_code,
- WhereToPoint where_to_point) {
- if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
-
- DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
- sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source");
- sink_->PutInt(kSourceObjectIndex, "kSourceObjectIndex");
-}
-
-
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
- Object* root;
+ HandleScope scope(isolate);
- {
- HandleScope scope(isolate);
+ SmartPointer<SerializedCodeData> scd(
+ SerializedCodeData::FromCachedData(cached_data, *source));
+ if (scd.is_empty()) {
+ if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
+ DCHECK(cached_data->rejected());
+ return MaybeHandle<SharedFunctionInfo>();
+ }
- SmartPointer<SerializedCodeData> scd(
- SerializedCodeData::FromCachedData(cached_data, *source));
- if (scd.is_empty()) {
- if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
- DCHECK(cached_data->rejected());
- return MaybeHandle<SharedFunctionInfo>();
- }
+ // Eagerly expand string table to avoid allocations during deserialization.
+ StringTable::EnsureCapacityForDeserialization(isolate,
+ scd->NumInternalizedStrings());
- // Eagerly expand string table to avoid allocations during deserialization.
- StringTable::EnsureCapacityForDeserialization(
- isolate, scd->NumInternalizedStrings());
-
- // Prepare and register list of attached objects.
- Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
- Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
- code_stub_keys.length() + kCodeStubsBaseIndex);
- attached_objects[kSourceObjectIndex] = source;
- for (int i = 0; i < code_stub_keys.length(); i++) {
- attached_objects[i + kCodeStubsBaseIndex] =
- CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
- }
+ // Prepare and register list of attached objects.
+ Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
+ Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
+ code_stub_keys.length() + kCodeStubsBaseIndex);
+ attached_objects[kSourceObjectIndex] = source;
+ for (int i = 0; i < code_stub_keys.length(); i++) {
+ attached_objects[i + kCodeStubsBaseIndex] =
+ CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
+ }
- Deserializer deserializer(scd.get());
- deserializer.SetAttachedObjects(&attached_objects);
+ Deserializer deserializer(scd.get());
+ deserializer.SetAttachedObjects(attached_objects);
- // Deserialize.
- deserializer.DeserializePartial(isolate, &root, Deserializer::NULL_ON_OOM);
- if (root == NULL) {
- // Deserializing may fail if the reservations cannot be fulfilled.
- if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
- return MaybeHandle<SharedFunctionInfo>();
- }
- deserializer.FlushICacheForNewCodeObjects();
+ // Deserialize.
+ Handle<SharedFunctionInfo> result;
+ if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) {
+ // Deserializing may fail if the reservations cannot be fulfilled.
+ if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
+ return MaybeHandle<SharedFunctionInfo>();
}
+ deserializer.FlushICacheForNewCodeObjects();
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int length = cached_data->length();
PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
}
- Handle<SharedFunctionInfo> result(SharedFunctionInfo::cast(root), isolate);
result->set_deserialized(true);
if (isolate->logger()->is_logging_code_events() ||
@@ -2335,7 +2449,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
*result, NULL, name);
}
- return result;
+ return scope.CloseAndEscape(result);
}
@@ -2348,12 +2462,11 @@ void SerializedData::AllocateData(int size) {
}
-SnapshotData::SnapshotData(const SnapshotByteSink& sink,
- const Serializer& ser) {
+SnapshotData::SnapshotData(const Serializer& ser) {
DisallowHeapAllocation no_gc;
List<Reservation> reservations;
ser.EncodeReservations(&reservations);
- const List<byte>& payload = sink.data();
+ const List<byte>& payload = ser.sink()->data();
// Calculate sizes.
int reservation_size = reservations.length() * kInt32Size;
@@ -2364,7 +2477,7 @@ SnapshotData::SnapshotData(const SnapshotByteSink& sink,
// Set header values.
SetHeaderValue(kCheckSumOffset, Version::Hash());
- SetHeaderValue(kReservationsOffset, reservations.length());
+ SetHeaderValue(kNumReservationsOffset, reservations.length());
SetHeaderValue(kPayloadLengthOffset, payload.length());
// Copy reservation chunk sizes.
@@ -2385,12 +2498,12 @@ bool SnapshotData::IsSane() {
Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
return Vector<const Reservation>(
reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
- GetHeaderValue(kReservationsOffset));
+ GetHeaderValue(kNumReservationsOffset));
}
Vector<const byte> SnapshotData::Payload() const {
- int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
+ int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
const byte* payload = data_ + kHeaderSize + reservations_size;
int length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + length);
@@ -2445,19 +2558,22 @@ SerializedCodeData::SerializedCodeData(const List<byte>& payload,
int reservation_size = reservations.length() * kInt32Size;
int num_stub_keys = stub_keys->length();
int stub_keys_size = stub_keys->length() * kInt32Size;
- int size = kHeaderSize + reservation_size + stub_keys_size + payload.length();
+ int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
+ int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
+ int size = padded_payload_offset + payload.length();
// Allocate backing store and create result data.
AllocateData(size);
// Set header values.
+ SetHeaderValue(kMagicNumberOffset, kMagicNumber);
SetHeaderValue(kVersionHashOffset, Version::Hash());
SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
SetHeaderValue(kCpuFeaturesOffset,
static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
SetHeaderValue(kFlagHashOffset, FlagList::Hash());
SetHeaderValue(kNumInternalizedStringsOffset, cs.num_internalized_strings());
- SetHeaderValue(kReservationsOffset, reservations.length());
+ SetHeaderValue(kNumReservationsOffset, reservations.length());
SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
SetHeaderValue(kPayloadLengthOffset, payload.length());
@@ -2473,20 +2589,32 @@ SerializedCodeData::SerializedCodeData(const List<byte>& payload,
CopyBytes(data_ + kHeaderSize + reservation_size,
reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
+ memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
+
// Copy serialized data.
- CopyBytes(data_ + kHeaderSize + reservation_size + stub_keys_size,
- payload.begin(), static_cast<size_t>(payload.length()));
+ CopyBytes(data_ + padded_payload_offset, payload.begin(),
+ static_cast<size_t>(payload.length()));
}
-bool SerializedCodeData::IsSane(String* source) const {
- return GetHeaderValue(kVersionHashOffset) == Version::Hash() &&
- GetHeaderValue(kSourceHashOffset) == SourceHash(source) &&
- GetHeaderValue(kCpuFeaturesOffset) ==
- static_cast<uint32_t>(CpuFeatures::SupportedFeatures()) &&
- GetHeaderValue(kFlagHashOffset) == FlagList::Hash() &&
- Checksum(Payload()).Check(GetHeaderValue(kChecksum1Offset),
- GetHeaderValue(kChecksum2Offset));
+SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
+ String* source) const {
+ uint32_t magic_number = GetHeaderValue(kMagicNumberOffset);
+ uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
+ uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
+ uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
+ uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
+ uint32_t c1 = GetHeaderValue(kChecksum1Offset);
+ uint32_t c2 = GetHeaderValue(kChecksum2Offset);
+ if (magic_number != kMagicNumber) return MAGIC_NUMBER_MISMATCH;
+ if (version_hash != Version::Hash()) return VERSION_MISMATCH;
+ if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
+ if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
+ return CPU_FEATURES_MISMATCH;
+ }
+ if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
+ if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH;
+ return CHECK_SUCCESS;
}
@@ -2505,15 +2633,17 @@ Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
const {
return Vector<const Reservation>(
reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
- GetHeaderValue(kReservationsOffset));
+ GetHeaderValue(kNumReservationsOffset));
}
Vector<const byte> SerializedCodeData::Payload() const {
- int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
+ int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
- const byte* payload =
- data_ + kHeaderSize + reservations_size + code_stubs_size;
+ int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
+ int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
+ const byte* payload = data_ + padded_payload_offset;
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
int length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + length);
return Vector<const byte>(payload, length);
@@ -2525,9 +2655,26 @@ int SerializedCodeData::NumInternalizedStrings() const {
}
Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
- int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
+ int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
const byte* start = data_ + kHeaderSize + reservations_size;
return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
GetHeaderValue(kNumCodeStubKeysOffset));
}
+
+
+SerializedCodeData::SerializedCodeData(ScriptData* data)
+ : SerializedData(const_cast<byte*>(data->data()), data->length()) {}
+
+
+SerializedCodeData* SerializedCodeData::FromCachedData(ScriptData* cached_data,
+ String* source) {
+ DisallowHeapAllocation no_gc;
+ SerializedCodeData* scd = new SerializedCodeData(cached_data);
+ SanityCheckResult r = scd->SanityCheck(source);
+ if (r == CHECK_SUCCESS) return scd;
+ cached_data->Reject();
+ source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
+ delete scd;
+ return NULL;
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index da750cb3c6..b76abbcbac 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -5,7 +5,6 @@
#ifndef V8_SERIALIZE_H_
#define V8_SERIALIZE_H_
-#include "src/compiler.h"
#include "src/hashmap.h"
#include "src/heap-profiler.h"
#include "src/isolate.h"
@@ -14,6 +13,8 @@
namespace v8 {
namespace internal {
+class ScriptData;
+
// A TypeCode is used to distinguish different kinds of external reference.
// It is a single bit to make testing for types easy.
enum TypeCode {
@@ -193,6 +194,10 @@ class BackReference {
static BackReference SourceReference() { return BackReference(kSourceValue); }
+ static BackReference GlobalProxyReference() {
+ return BackReference(kGlobalProxyValue);
+ }
+
static BackReference LargeObjectReference(uint32_t index) {
return BackReference(SpaceBits::encode(LO_SPACE) |
ChunkOffsetBits::encode(index));
@@ -209,6 +214,7 @@ class BackReference {
bool is_valid() const { return bitfield_ != kInvalidValue; }
bool is_source() const { return bitfield_ == kSourceValue; }
+ bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
AllocationSpace space() const {
DCHECK(is_valid());
@@ -220,6 +226,12 @@ class BackReference {
return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
}
+ uint32_t large_object_index() const {
+ DCHECK(is_valid());
+ DCHECK(chunk_index() == 0);
+ return ChunkOffsetBits::decode(bitfield_);
+ }
+
uint32_t chunk_index() const {
DCHECK(is_valid());
return ChunkIndexBits::decode(bitfield_);
@@ -235,6 +247,7 @@ class BackReference {
private:
static const uint32_t kInvalidValue = 0xFFFFFFFF;
static const uint32_t kSourceValue = 0xFFFFFFFE;
+ static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
@@ -269,7 +282,7 @@ class BackReferenceMap : public AddressMapBase {
void Add(HeapObject* obj, BackReference b) {
DCHECK(b.is_valid());
- DCHECK_EQ(NULL, LookupEntry(map_, obj, false));
+ DCHECK_NULL(LookupEntry(map_, obj, false));
HashMap::Entry* entry = LookupEntry(map_, obj, true);
SetValue(entry, b.bitfield());
}
@@ -278,6 +291,10 @@ class BackReferenceMap : public AddressMapBase {
Add(string, BackReference::SourceReference());
}
+ void AddGlobalProxy(HeapObject* global_proxy) {
+ Add(global_proxy, BackReference::GlobalProxyReference());
+ }
+
private:
DisallowHeapAllocation no_allocation_;
HashMap* map_;
@@ -297,7 +314,7 @@ class HotObjectsList {
}
HeapObject* Get(int index) {
- DCHECK_NE(NULL, circular_queue_[index]);
+ DCHECK_NOT_NULL(circular_queue_[index]);
return circular_queue_[index];
}
@@ -449,6 +466,9 @@ class SerializerDeserializer: public ObjectVisitor {
// Used as index for the attached reference representing the source object.
static const int kSourceObjectReference = 0;
+ // Used as index for the attached reference representing the global proxy.
+ static const int kGlobalProxyReference = 0;
+
HotObjectsList hot_objects_;
};
@@ -482,12 +502,13 @@ class SerializedData {
protected:
void SetHeaderValue(int offset, uint32_t value) {
- memcpy(reinterpret_cast<uint32_t*>(data_) + offset, &value, sizeof(value));
+ uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
+ memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
}
uint32_t GetHeaderValue(int offset) const {
uint32_t value;
- memcpy(&value, reinterpret_cast<int*>(data_) + offset, sizeof(value));
+ memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
return value;
}
@@ -506,10 +527,10 @@ class Deserializer: public SerializerDeserializer {
template <class Data>
explicit Deserializer(Data* data)
: isolate_(NULL),
- attached_objects_(NULL),
source_(data->Payload()),
external_reference_decoder_(NULL),
- deserialized_large_objects_(0) {
+ deserialized_large_objects_(0),
+ deserializing_user_code_(false) {
DecodeReservation(data->Reservations());
}
@@ -518,23 +539,22 @@ class Deserializer: public SerializerDeserializer {
// Deserialize the snapshot into an empty heap.
void Deserialize(Isolate* isolate);
- enum OnOOM { FATAL_ON_OOM, NULL_ON_OOM };
-
// Deserialize a single object and the objects reachable from it.
- // We may want to abort gracefully even if deserialization fails.
- void DeserializePartial(Isolate* isolate, Object** root,
- OnOOM on_oom = FATAL_ON_OOM);
+ MaybeHandle<Object> DeserializePartial(
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+ Handle<FixedArray>* outdated_contexts_out);
+
+ // Deserialize a shared function info. Fail gracefully.
+ MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
void FlushICacheForNewCodeObjects();
- // Serialized user code reference certain objects that are provided in a list
- // By calling this method, we assume that we are deserializing user code.
- void SetAttachedObjects(Vector<Handle<Object> >* attached_objects) {
+ // Pass a vector of externally-provided objects referenced by the snapshot.
+ // The ownership to its backing store is handed over as well.
+ void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
attached_objects_ = attached_objects;
}
- bool deserializing_user_code() { return attached_objects_ != NULL; }
-
private:
virtual void VisitPointers(Object** start, Object** end);
@@ -542,10 +562,18 @@ class Deserializer: public SerializerDeserializer {
UNREACHABLE();
}
+ void Initialize(Isolate* isolate);
+
+ bool deserializing_user_code() { return deserializing_user_code_; }
+
void DecodeReservation(Vector<const SerializedData::Reservation> res);
bool ReserveSpace();
+ void UnalignedCopy(Object** dest, Object** src) {
+ memcpy(dest, src, sizeof(*src));
+ }
+
// Allocation sites are present in the snapshot, and must be linked into
// a list at deserialization time.
void RelinkAllocationSite(AllocationSite* site);
@@ -571,7 +599,7 @@ class Deserializer: public SerializerDeserializer {
Isolate* isolate_;
// Objects from the attached object descriptions in the serialized user code.
- Vector<Handle<Object> >* attached_objects_;
+ Vector<Handle<Object> > attached_objects_;
SnapshotByteSource source_;
// The address of the next object that will be allocated in each space.
@@ -586,6 +614,8 @@ class Deserializer: public SerializerDeserializer {
List<HeapObject*> deserialized_large_objects_;
+ bool deserializing_user_code_;
+
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
@@ -677,7 +707,8 @@ class Serializer : public SerializerDeserializer {
}
}
- void InitializeAllocators();
+ bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
+
// This will return the space for an object.
static AllocationSpace SpaceOfObject(HeapObject* object);
BackReference AllocateLargeObject(int size);
@@ -703,6 +734,8 @@ class Serializer : public SerializerDeserializer {
return max_chunk_size_[space];
}
+ SnapshotByteSink* sink() const { return sink_; }
+
Isolate* isolate_;
SnapshotByteSink* sink_;
@@ -711,8 +744,9 @@ class Serializer : public SerializerDeserializer {
BackReferenceMap back_reference_map_;
RootIndexMap root_index_map_;
- friend class ObjectSerializer;
friend class Deserializer;
+ friend class ObjectSerializer;
+ friend class SnapshotData;
private:
CodeAddressMap* code_address_map_;
@@ -734,11 +768,12 @@ class Serializer : public SerializerDeserializer {
class PartialSerializer : public Serializer {
public:
- PartialSerializer(Isolate* isolate,
- Serializer* startup_snapshot_serializer,
+ PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
SnapshotByteSink* sink)
- : Serializer(isolate, sink),
- startup_serializer_(startup_snapshot_serializer) {
+ : Serializer(isolate, sink),
+ startup_serializer_(startup_snapshot_serializer),
+ outdated_contexts_(0),
+ global_object_(NULL) {
InitializeCodeAddressMap();
}
@@ -762,8 +797,11 @@ class PartialSerializer : public Serializer {
startup_serializer_->isolate()->heap()->fixed_cow_array_map();
}
+ void SerializeOutdatedContextsAsFixedArray();
Serializer* startup_serializer_;
+ List<BackReference> outdated_contexts_;
+ Object* global_object_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
};
@@ -845,8 +883,6 @@ class CodeSerializer : public Serializer {
WhereToPoint where_to_point);
void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
WhereToPoint where_to_point);
- void SerializeSourceObject(HowToCode how_to_code,
- WhereToPoint where_to_point);
void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
WhereToPoint where_to_point);
int AddCodeStubKey(uint32_t stub_key);
@@ -864,7 +900,7 @@ class CodeSerializer : public Serializer {
class SnapshotData : public SerializedData {
public:
// Used when producing.
- SnapshotData(const SnapshotByteSink& sink, const Serializer& ser);
+ explicit SnapshotData(const Serializer& ser);
// Used when consuming.
explicit SnapshotData(const Vector<const byte> snapshot)
@@ -881,14 +917,16 @@ class SnapshotData : public SerializedData {
private:
bool IsSane();
- // The data header consists of int-sized entries:
+ // The data header consists of uint32_t-sized entries:
// [0] version hash
// [1] number of reservation size entries
// [2] payload length
+ // ... reservations
+ // ... serialized payload
static const int kCheckSumOffset = 0;
- static const int kReservationsOffset = 1;
- static const int kPayloadLengthOffset = 2;
- static const int kHeaderSize = (kPayloadLengthOffset + 1) * kIntSize;
+ static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
+ static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
+ static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
};
@@ -897,14 +935,7 @@ class SerializedCodeData : public SerializedData {
public:
// Used when consuming.
static SerializedCodeData* FromCachedData(ScriptData* cached_data,
- String* source) {
- DisallowHeapAllocation no_gc;
- SerializedCodeData* scd = new SerializedCodeData(cached_data);
- if (scd->IsSane(source)) return scd;
- cached_data->Reject();
- delete scd;
- return NULL;
- }
+ String* source);
// Used when producing.
SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
@@ -919,35 +950,52 @@ class SerializedCodeData : public SerializedData {
Vector<const uint32_t> CodeStubKeys() const;
private:
- explicit SerializedCodeData(ScriptData* data)
- : SerializedData(const_cast<byte*>(data->data()), data->length()) {}
-
- bool IsSane(String* source) const;
-
- static uint32_t SourceHash(String* source) { return source->length(); }
+ explicit SerializedCodeData(ScriptData* data);
+
+ enum SanityCheckResult {
+ CHECK_SUCCESS = 0,
+ MAGIC_NUMBER_MISMATCH = 1,
+ VERSION_MISMATCH = 2,
+ SOURCE_MISMATCH = 3,
+ CPU_FEATURES_MISMATCH = 4,
+ FLAGS_MISMATCH = 5,
+ CHECKSUM_MISMATCH = 6,
+ };
- // The data header consists of int-sized entries:
- // [0] version hash
- // [1] source hash
- // [2] cpu features
- // [3] flag hash
- // [4] number of internalized strings
- // [5] number of code stub keys
- // [6] number of reservation size entries
- // [7] payload length
- // [8] checksum 1
- // [9] checksum 2
- static const int kVersionHashOffset = 0;
- static const int kSourceHashOffset = 1;
- static const int kCpuFeaturesOffset = 2;
- static const int kFlagHashOffset = 3;
- static const int kNumInternalizedStringsOffset = 4;
- static const int kReservationsOffset = 5;
- static const int kNumCodeStubKeysOffset = 6;
- static const int kPayloadLengthOffset = 7;
- static const int kChecksum1Offset = 8;
- static const int kChecksum2Offset = 9;
- static const int kHeaderSize = (kChecksum2Offset + 1) * kIntSize;
+ SanityCheckResult SanityCheck(String* source) const;
+
+ uint32_t SourceHash(String* source) const { return source->length(); }
+
+ static const uint32_t kMagicNumber = 0xC0D1F1ED;
+
+ // The data header consists of uint32_t-sized entries:
+ // [ 0] magic number
+ // [ 1] version hash
+ // [ 2] source hash
+ // [ 3] cpu features
+ // [ 4] flag hash
+ // [ 5] number of internalized strings
+ // [ 6] number of code stub keys
+ // [ 7] number of reservation size entries
+ // [ 8] payload length
+ // [ 9] payload checksum part 1
+ // [10] payload checksum part 2
+ // ... reservations
+ // ... code stub keys
+ // ... serialized payload
+ static const int kMagicNumberOffset = 0;
+ static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
+ static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
+ static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
+ static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
+ static const int kNumInternalizedStringsOffset = kFlagHashOffset + kInt32Size;
+ static const int kNumReservationsOffset =
+ kNumInternalizedStringsOffset + kInt32Size;
+ static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
+ static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
+ static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
+ static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
+ static const int kHeaderSize = kChecksum2Offset + kInt32Size;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index 6dd08f4a5e..d198291907 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -13,6 +13,8 @@
#include "src/arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/simulator-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/simulator-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index dc7f655abc..637bac0f4d 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -8,7 +8,7 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
-#include "src/serialize.h"
+#include "src/full-codegen.h"
#include "src/snapshot.h"
namespace v8 {
@@ -19,60 +19,177 @@ bool Snapshot::HaveASnapshotToStartFrom() {
}
+#ifdef DEBUG
+bool Snapshot::SnapshotIsValid(v8::StartupData* snapshot_blob) {
+ return !Snapshot::ExtractStartupData(snapshot_blob).is_empty() &&
+ !Snapshot::ExtractContextData(snapshot_blob).is_empty();
+}
+#endif // DEBUG
+
+
+bool Snapshot::EmbedsScript() {
+ if (!HaveASnapshotToStartFrom()) return false;
+ const v8::StartupData blob = SnapshotBlob();
+ return ExtractMetadata(&blob).embeds_script();
+}
+
+
+uint32_t Snapshot::SizeOfFirstPage(AllocationSpace space) {
+ DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE);
+ if (!HaveASnapshotToStartFrom()) {
+ return static_cast<uint32_t>(MemoryAllocator::PageAreaSize(space));
+ }
+ uint32_t size;
+ int offset = kFirstPageSizesOffset + (space - FIRST_PAGED_SPACE) * kInt32Size;
+ memcpy(&size, SnapshotBlob().data + offset, kInt32Size);
+ return size;
+}
+
+
bool Snapshot::Initialize(Isolate* isolate) {
if (!HaveASnapshotToStartFrom()) return false;
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
const v8::StartupData blob = SnapshotBlob();
- SnapshotData snapshot_data(ExtractStartupData(&blob));
+ Vector<const byte> startup_data = ExtractStartupData(&blob);
+ SnapshotData snapshot_data(startup_data);
Deserializer deserializer(&snapshot_data);
bool success = isolate->Init(&deserializer);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
- PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
+ int bytes = startup_data.length();
+ PrintF("[Deserializing isolate (%d bytes) took %0.3f ms]\n", bytes, ms);
}
return success;
}
-Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
+MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+ Handle<FixedArray>* outdated_contexts_out) {
if (!HaveASnapshotToStartFrom()) return Handle<Context>();
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
const v8::StartupData blob = SnapshotBlob();
- SnapshotData snapshot_data(ExtractContextData(&blob));
+ Vector<const byte> context_data = ExtractContextData(&blob);
+ SnapshotData snapshot_data(context_data);
Deserializer deserializer(&snapshot_data);
- Object* root;
- deserializer.DeserializePartial(isolate, &root);
- CHECK(root->IsContext());
- return Handle<Context>(Context::cast(root));
+
+ MaybeHandle<Object> maybe_context = deserializer.DeserializePartial(
+ isolate, global_proxy, outdated_contexts_out);
+ Handle<Object> result;
+ if (!maybe_context.ToHandle(&result)) return MaybeHandle<Context>();
+ CHECK(result->IsContext());
+ // If the snapshot does not contain a custom script, we need to update
+ // the global object for exactly one context.
+ CHECK(EmbedsScript() || (*outdated_contexts_out)->length() == 1);
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ int bytes = context_data.length();
+ PrintF("[Deserializing context (%d bytes) took %0.3f ms]\n", bytes, ms);
+ }
+ return Handle<Context>::cast(result);
+}
+
+
+void CalculateFirstPageSizes(bool is_default_snapshot,
+ const SnapshotData& startup_snapshot,
+ const SnapshotData& context_snapshot,
+ uint32_t* sizes_out) {
+ Vector<const SerializedData::Reservation> startup_reservations =
+ startup_snapshot.Reservations();
+ Vector<const SerializedData::Reservation> context_reservations =
+ context_snapshot.Reservations();
+ int startup_index = 0;
+ int context_index = 0;
+ for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
+ bool single_chunk = true;
+ while (!startup_reservations[startup_index].is_last()) {
+ single_chunk = false;
+ startup_index++;
+ }
+ while (!context_reservations[context_index].is_last()) {
+ single_chunk = false;
+ context_index++;
+ }
+
+ uint32_t required = kMaxUInt32;
+ if (single_chunk) {
+ // If both the startup snapshot data and the context snapshot data on
+ // this space fit in a single page, then we consider limiting the size
+ // of the first page. For this, we add the chunk sizes and some extra
+ // allowance. This way we achieve a smaller startup memory footprint.
+ required = (startup_reservations[startup_index].chunk_size() +
+ 2 * context_reservations[context_index].chunk_size()) +
+ Page::kObjectStartOffset;
+ } else {
+ // We expect the vanilla snapshot to only require on page per space.
+ DCHECK(!is_default_snapshot);
+ }
+
+ if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) {
+ uint32_t max_size =
+ MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space));
+ sizes_out[space - FIRST_PAGED_SPACE] = Min(required, max_size);
+ } else {
+ DCHECK(single_chunk);
+ }
+ startup_index++;
+ context_index++;
+ }
+
+ DCHECK_EQ(startup_reservations.length(), startup_index);
+ DCHECK_EQ(context_reservations.length(), context_index);
}
v8::StartupData Snapshot::CreateSnapshotBlob(
- const Vector<const byte> startup_data,
- const Vector<const byte> context_data) {
+ const i::StartupSerializer& startup_ser,
+ const i::PartialSerializer& context_ser, Snapshot::Metadata metadata) {
+ SnapshotData startup_snapshot(startup_ser);
+ SnapshotData context_snapshot(context_ser);
+ Vector<const byte> startup_data = startup_snapshot.RawData();
+ Vector<const byte> context_data = context_snapshot.RawData();
+
+ uint32_t first_page_sizes[kNumPagedSpaces];
+
+ CalculateFirstPageSizes(metadata.embeds_script(), startup_snapshot,
+ context_snapshot, first_page_sizes);
+
int startup_length = startup_data.length();
int context_length = context_data.length();
- int context_offset = kIntSize + startup_length;
+ int context_offset = ContextOffset(startup_length);
+
int length = context_offset + context_length;
char* data = new char[length];
- memcpy(data, &startup_length, kIntSize);
- memcpy(data + kIntSize, startup_data.begin(), startup_length);
+ memcpy(data + kMetadataOffset, &metadata.RawValue(), kInt32Size);
+ memcpy(data + kFirstPageSizesOffset, first_page_sizes,
+ kNumPagedSpaces * kInt32Size);
+ memcpy(data + kStartupLengthOffset, &startup_length, kInt32Size);
+ memcpy(data + kStartupDataOffset, startup_data.begin(), startup_length);
memcpy(data + context_offset, context_data.begin(), context_length);
v8::StartupData result = {data, length};
return result;
}
+Snapshot::Metadata Snapshot::ExtractMetadata(const v8::StartupData* data) {
+ uint32_t raw;
+ memcpy(&raw, data->data + kMetadataOffset, kInt32Size);
+ return Metadata(raw);
+}
+
+
Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
DCHECK_LT(kIntSize, data->raw_size);
int startup_length;
- memcpy(&startup_length, data->data, kIntSize);
+ memcpy(&startup_length, data->data + kStartupLengthOffset, kInt32Size);
DCHECK_LT(startup_length, data->raw_size);
const byte* startup_data =
- reinterpret_cast<const byte*>(data->data + kIntSize);
+ reinterpret_cast<const byte*>(data->data + kStartupDataOffset);
return Vector<const byte>(startup_data, startup_length);
}
@@ -80,8 +197,8 @@ Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data) {
DCHECK_LT(kIntSize, data->raw_size);
int startup_length;
- memcpy(&startup_length, data->data, kIntSize);
- int context_offset = kIntSize + startup_length;
+ memcpy(&startup_length, data->data + kStartupLengthOffset, kIntSize);
+ int context_offset = ContextOffset(startup_length);
const byte* context_data =
reinterpret_cast<const byte*>(data->data + context_offset);
DCHECK_LT(context_offset, data->raw_size);
diff --git a/deps/v8/src/snapshot-external.cc b/deps/v8/src/snapshot-external.cc
index 2fda571252..a9a5df1cd7 100644
--- a/deps/v8/src/snapshot-external.cc
+++ b/deps/v8/src/snapshot-external.cc
@@ -6,6 +6,7 @@
#include "src/snapshot.h"
+#include "src/base/platform/mutex.h"
#include "src/serialize.h"
#include "src/snapshot-source-sink.h"
#include "src/v8.h" // for V8::Initialize
@@ -19,19 +20,24 @@
namespace v8 {
namespace internal {
+static base::LazyMutex external_startup_data_mutex = LAZY_MUTEX_INITIALIZER;
static v8::StartupData external_startup_blob = {NULL, 0};
void SetSnapshotFromFile(StartupData* snapshot_blob) {
+ base::LockGuard<base::Mutex> lock_guard(
+ external_startup_data_mutex.Pointer());
DCHECK(snapshot_blob);
DCHECK(snapshot_blob->data);
DCHECK(snapshot_blob->raw_size > 0);
DCHECK(!external_startup_blob.data);
- // Validate snapshot blob.
- DCHECK(!Snapshot::ExtractStartupData(snapshot_blob).is_empty());
- DCHECK(!Snapshot::ExtractContextData(snapshot_blob).is_empty());
+ DCHECK(Snapshot::SnapshotIsValid(snapshot_blob));
external_startup_blob = *snapshot_blob;
}
-const v8::StartupData Snapshot::SnapshotBlob() { return external_startup_blob; }
+const v8::StartupData Snapshot::SnapshotBlob() {
+ base::LockGuard<base::Mutex> lock_guard(
+ external_startup_data_mutex.Pointer());
+ return external_startup_blob;
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h
index 25a07cdd97..3135756a32 100644
--- a/deps/v8/src/snapshot.h
+++ b/deps/v8/src/snapshot.h
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/isolate.h"
+#include "src/serialize.h"
#ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_
@@ -12,25 +13,70 @@ namespace internal {
class Snapshot : public AllStatic {
public:
+ class Metadata {
+ public:
+ explicit Metadata(uint32_t data = 0) : data_(data) {}
+ bool embeds_script() { return EmbedsScriptBits::decode(data_); }
+ void set_embeds_script(bool v) {
+ data_ = EmbedsScriptBits::update(data_, v);
+ }
+
+ uint32_t& RawValue() { return data_; }
+
+ private:
+ class EmbedsScriptBits : public BitField<bool, 0, 1> {};
+ uint32_t data_;
+ };
+
// Initialize the Isolate from the internal snapshot. Returns false if no
// snapshot could be found.
static bool Initialize(Isolate* isolate);
// Create a new context using the internal partial snapshot.
- static Handle<Context> NewContextFromSnapshot(Isolate* isolate);
+ static MaybeHandle<Context> NewContextFromSnapshot(
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+ Handle<FixedArray>* outdated_contexts_out);
static bool HaveASnapshotToStartFrom();
+ static bool EmbedsScript();
+
+ static uint32_t SizeOfFirstPage(AllocationSpace space);
+
// To be implemented by the snapshot source.
static const v8::StartupData SnapshotBlob();
static v8::StartupData CreateSnapshotBlob(
- const Vector<const byte> startup_data,
- const Vector<const byte> context_data);
+ const StartupSerializer& startup_ser,
+ const PartialSerializer& context_ser, Snapshot::Metadata metadata);
+
+#ifdef DEBUG
+ static bool SnapshotIsValid(v8::StartupData* snapshot_blob);
+#endif // DEBUG
+ private:
static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
static Vector<const byte> ExtractContextData(const v8::StartupData* data);
+ static Metadata ExtractMetadata(const v8::StartupData* data);
+
+ // Snapshot blob layout:
+ // [0] metadata
+ // [1 - 6] pre-calculated first page sizes for paged spaces
+ // [7] serialized start up data length
+ // ... serialized start up data
+ // ... serialized context data
+
+ static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+
+ static const int kMetadataOffset = 0;
+ static const int kFirstPageSizesOffset = kMetadataOffset + kInt32Size;
+ static const int kStartupLengthOffset =
+ kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
+ static const int kStartupDataOffset = kStartupLengthOffset + kInt32Size;
+
+ static int ContextOffset(int startup_length) {
+ return kStartupDataOffset + startup_length;
+ }
- private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
};
diff --git a/deps/v8/src/startup-data-util.cc b/deps/v8/src/startup-data-util.cc
new file mode 100644
index 0000000000..2000e3ccca
--- /dev/null
+++ b/deps/v8/src/startup-data-util.cc
@@ -0,0 +1,91 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/startup-data-util.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/base/logging.h"
+
+
+namespace v8 {
+
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+
+StartupDataHandler::StartupDataHandler(const char* exec_path,
+ const char* natives_blob,
+ const char* snapshot_blob) {
+ // If we have (at least one) explicitly given blob, use those.
+ // If not, use the default blob locations next to the d8 binary.
+ if (natives_blob || snapshot_blob) {
+ LoadFromFiles(natives_blob, snapshot_blob);
+ } else {
+ char* natives;
+ char* snapshot;
+ LoadFromFiles(RelativePath(&natives, exec_path, "natives_blob.bin"),
+ RelativePath(&snapshot, exec_path, "snapshot_blob.bin"));
+
+ free(natives);
+ free(snapshot);
+ }
+}
+
+
+StartupDataHandler::~StartupDataHandler() {
+ delete[] natives_.data;
+ delete[] snapshot_.data;
+}
+
+
+char* StartupDataHandler::RelativePath(char** buffer, const char* exec_path,
+ const char* name) {
+ DCHECK(exec_path);
+ const char* last_slash = strrchr(exec_path, '/');
+ if (last_slash) {
+ int after_slash = last_slash - exec_path + 1;
+ int name_length = static_cast<int>(strlen(name));
+ *buffer = reinterpret_cast<char*>(calloc(after_slash + name_length + 1, 1));
+ strncpy(*buffer, exec_path, after_slash);
+ strncat(*buffer, name, name_length);
+ } else {
+ *buffer = strdup(name);
+ }
+ return *buffer;
+}
+
+
+void StartupDataHandler::LoadFromFiles(const char* natives_blob,
+ const char* snapshot_blob) {
+ Load(natives_blob, &natives_, v8::V8::SetNativesDataBlob);
+ Load(snapshot_blob, &snapshot_, v8::V8::SetSnapshotDataBlob);
+}
+
+
+void StartupDataHandler::Load(const char* blob_file,
+ v8::StartupData* startup_data,
+ void (*setter_fn)(v8::StartupData*)) {
+ startup_data->data = NULL;
+ startup_data->raw_size = 0;
+
+ if (!blob_file) return;
+
+ FILE* file = fopen(blob_file, "rb");
+ if (!file) return;
+
+ fseek(file, 0, SEEK_END);
+ startup_data->raw_size = ftell(file);
+ rewind(file);
+
+ startup_data->data = new char[startup_data->raw_size];
+ int read_size = static_cast<int>(fread(const_cast<char*>(startup_data->data),
+ 1, startup_data->raw_size, file));
+ fclose(file);
+
+ if (startup_data->raw_size == read_size) (*setter_fn)(startup_data);
+}
+
+#endif // V8_USE_EXTERNAL_STARTUP_DATA
+
+} // namespace v8
diff --git a/deps/v8/src/startup-data-util.h b/deps/v8/src/startup-data-util.h
new file mode 100644
index 0000000000..79b4171343
--- /dev/null
+++ b/deps/v8/src/startup-data-util.h
@@ -0,0 +1,51 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef V8_STARTUP_DATA_UTIL_H_
+#define V8_STARTUP_DATA_UTIL_H_
+
+#include "include/v8.h"
+
+namespace v8 {
+
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+// Helper class to load the startup data files from disk.
+//
+// This is meant as a convenience for stand-alone binaries like d8, cctest,
+// unittest. A V8 embedder would likely either handle startup data on their
+// own or just disable the feature if they don't want to handle it at all,
+// while tools like cctest need to work in either configuration. Hence this is
+// not meant for inclusion in the general v8 library.
+class StartupDataHandler {
+ public:
+ // Load startup data, and call the v8::V8::Set*DataBlob API functions.
+ //
+ // natives_blob and snapshot_blob will be loaded realitive to exec_path,
+ // which would usually be the equivalent of argv[0].
+ StartupDataHandler(const char* exec_path, const char* natives_blob,
+ const char* snapshot_blob);
+ ~StartupDataHandler();
+
+ private:
+ static char* RelativePath(char** buffer, const char* exec_path,
+ const char* name);
+
+ void LoadFromFiles(const char* natives_blob, const char* snapshot_blob);
+
+ void Load(const char* blob_file, v8::StartupData* startup_data,
+ void (*setter_fn)(v8::StartupData*));
+
+ v8::StartupData natives_;
+ v8::StartupData snapshot_;
+
+ // Disallow copy & assign.
+ StartupDataHandler(const StartupDataHandler& other);
+ void operator=(const StartupDataHandler& other);
+};
+#endif // V8_USE_EXTERNAL_STARTUP_DATA
+
+} // namespace v8
+
+#endif // V8_STARTUP_DATA_UTIL_H_
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 21f66a6130..d53cdc092e 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -335,7 +335,7 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
- if (details.type() == FIELD) {
+ if (details.type() == DATA) {
Object* key = descs->GetKey(i);
if (key->IsString() || key->IsNumber()) {
int len = 3;
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.cc b/deps/v8/src/third_party/fdlibm/fdlibm.cc
index cc5dbc2f97..b8bc243f4d 100644
--- a/deps/v8/src/third_party/fdlibm/fdlibm.cc
+++ b/deps/v8/src/third_party/fdlibm/fdlibm.cc
@@ -126,7 +126,7 @@ static const double PIo2[] = {
};
-int __kernel_rem_pio2(double* x, double* y, int e0, int nx) {
+INLINE(int __kernel_rem_pio2(double* x, double* y, int e0, int nx)) {
static const int32_t jk = 3;
double fw;
int32_t jx = nx - 1;
@@ -135,12 +135,12 @@ int __kernel_rem_pio2(double* x, double* y, int e0, int nx) {
int32_t q0 = e0 - 24 * (jv + 1);
int32_t m = jx + jk;
- double f[10];
+ double f[20];
for (int i = 0, j = jv - jx; i <= m; i++, j++) {
f[i] = (j < 0) ? zero : static_cast<double>(two_over_pi[j]);
}
- double q[10];
+ double q[20];
for (int i = 0; i <= jk; i++) {
fw = 0.0;
for (int j = 0; j <= jx; j++) fw += x[j] * f[jx + i - j];
@@ -151,7 +151,7 @@ int __kernel_rem_pio2(double* x, double* y, int e0, int nx) {
recompute:
- int32_t iq[10];
+ int32_t iq[20];
double z = q[jz];
for (int i = 0, j = jz; j > 0; i++, j--) {
fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
@@ -242,7 +242,7 @@ recompute:
fw *= twon24;
}
- double fq[10];
+ double fq[20];
for (int i = jz; i >= 0; i--) {
fw = 0.0;
for (int k = 0; k <= jk && k <= jz - i; k++) fw += PIo2[k] * q[i + k];
@@ -264,7 +264,7 @@ int rempio2(double x, double* y) {
int32_t ix = hx & 0x7fffffff;
if (ix >= 0x7ff00000) {
- *y = base::OS::nan_value();
+ *y = std::numeric_limits<double>::quiet_NaN();
return 0;
}
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.js b/deps/v8/src/third_party/fdlibm/fdlibm.js
index ceeacc59bb..8804469905 100644
--- a/deps/v8/src/third_party/fdlibm/fdlibm.js
+++ b/deps/v8/src/third_party/fdlibm/fdlibm.js
@@ -20,10 +20,13 @@
// and exposed through kMath as typed array. We assume the compiler to convert
// from decimal to binary accurately enough to produce the intended values.
// kMath is initialized to a Float64Array during genesis and not writable.
+// rempio2result is used as a container for return values of %RemPiO2. It is
+// initialized to a two-element Float64Array during genesis.
"use strict";
var kMath;
+var rempio2result;
const INVPIO2 = kMath[0];
const PIO2_1 = kMath[1];
@@ -38,6 +41,7 @@ const PIO4LO = kMath[33];
// Compute k and r such that x - k*pi/2 = r where |r| < pi/4. For
// precision, r is returned as two values y0 and y1 such that r = y0 + y1
// to more than double precision.
+
macro REMPIO2(X)
var n, y0, y1;
var hx = %_DoubleHi(X);
@@ -105,10 +109,9 @@ macro REMPIO2(X)
}
} else {
// Need to do full Payne-Hanek reduction here.
- var r = %RemPiO2(X);
- n = r[0];
- y0 = r[1];
- y1 = r[2];
+ n = %RemPiO2(X, rempio2result);
+ y0 = rempio2result[0];
+ y1 = rempio2result[1];
}
endmacro
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index de8ede90ca..0f46b118cd 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -40,6 +40,7 @@ namespace internal {
T(COLON, ":", 0) \
T(SEMICOLON, ";", 0) \
T(PERIOD, ".", 0) \
+ T(ELLIPSIS, "...", 0) \
T(CONDITIONAL, "?", 3) \
T(INC, "++", 0) \
T(DEC, "--", 0) \
@@ -192,7 +193,7 @@ class Token {
return token_type[tok] == 'K';
}
- static bool IsIdentifier(Value tok, StrictMode strict_mode,
+ static bool IsIdentifier(Value tok, LanguageMode language_mode,
bool is_generator) {
switch (tok) {
case IDENTIFIER:
@@ -200,9 +201,9 @@ class Token {
case FUTURE_STRICT_RESERVED_WORD:
case LET:
case STATIC:
- return strict_mode == SLOPPY;
+ return is_sloppy(language_mode);
case YIELD:
- return !is_generator && strict_mode == SLOPPY;
+ return !is_generator && is_sloppy(language_mode);
default:
return false;
}
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index c8c63d7ba2..43fc90b1b5 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -97,7 +97,7 @@ Handle<TransitionArray> TransitionArray::Insert(Handle<Map> map,
bool is_special_transition = flag == SPECIAL_TRANSITION;
DCHECK_EQ(is_special_transition, IsSpecialTransition(*name));
PropertyDetails details = is_special_transition
- ? PropertyDetails(NONE, FIELD, 0)
+ ? PropertyDetails(NONE, DATA, 0)
: GetTargetDetails(*name, *target);
int insertion_index = kNotFound;
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index 612004ebee..9b64082d35 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -30,11 +30,6 @@ Handle<Object> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
}
-Handle<Object> TypeFeedbackVector::GenericSentinel(Isolate* isolate) {
- return isolate->factory()->generic_symbol();
-}
-
-
Handle<Object> TypeFeedbackVector::MonomorphicArraySentinel(
Isolate* isolate, ElementsKind elements_kind) {
return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index c51d9877f1..b0be315b2b 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -132,22 +132,9 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
// This logic is copied from
// StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget.
-// TODO(mvstanton): with weak handling of all vector ics, this logic should
-// actually be completely eliminated and we no longer need to clear the
-// vector ICs.
-static bool ClearLogic(Heap* heap, int ic_age, Code::Kind kind,
- InlineCacheState state) {
- if (FLAG_cleanup_code_caches_at_gc &&
- (kind == Code::CALL_IC || heap->flush_monomorphic_ics() ||
- // TODO(mvstanton): is this ic_age granular enough? it comes from
- // the SharedFunctionInfo which may change on a different schedule
- // than ic targets.
- // ic_age != heap->global_ic_age() ||
- // is_invalidated_weak_stub ||
- heap->isolate()->serializer_enabled())) {
- return true;
- }
- return false;
+static bool ClearLogic(Heap* heap, int ic_age) {
+ return FLAG_cleanup_code_caches_at_gc &&
+ heap->isolate()->serializer_enabled();
}
@@ -171,16 +158,22 @@ void TypeFeedbackVector::ClearSlots(SharedFunctionInfo* shared) {
}
}
}
+}
- slots = ICSlots();
- if (slots == 0) return;
- // Now clear vector-based ICs.
- // Try and pass the containing code (the "host").
- Heap* heap = isolate->heap();
- Code* host = shared->code();
+void TypeFeedbackVector::ClearICSlotsImpl(SharedFunctionInfo* shared,
+ bool force_clear) {
+ Heap* heap = GetIsolate()->heap();
+
// I'm not sure yet if this ic age is the correct one.
int ic_age = shared->ic_age();
+
+ if (!force_clear && !ClearLogic(heap, ic_age)) return;
+
+ int slots = ICSlots();
+ Code* host = shared->code();
+ Object* uninitialized_sentinel =
+ TypeFeedbackVector::RawUninitializedSentinel(heap);
for (int i = 0; i < slots; i++) {
FeedbackVectorICSlot slot(i);
Object* obj = Get(slot);
@@ -188,19 +181,13 @@ void TypeFeedbackVector::ClearSlots(SharedFunctionInfo* shared) {
Code::Kind kind = GetKind(slot);
if (kind == Code::CALL_IC) {
CallICNexus nexus(this, slot);
- if (ClearLogic(heap, ic_age, kind, nexus.StateFromFeedback())) {
- nexus.Clear(host);
- }
+ nexus.Clear(host);
} else if (kind == Code::LOAD_IC) {
LoadICNexus nexus(this, slot);
- if (ClearLogic(heap, ic_age, kind, nexus.StateFromFeedback())) {
- nexus.Clear(host);
- }
+ nexus.Clear(host);
} else if (kind == Code::KEYED_LOAD_IC) {
KeyedLoadICNexus nexus(this, slot);
- if (ClearLogic(heap, ic_age, kind, nexus.StateFromFeedback())) {
- nexus.Clear(host);
- }
+ nexus.Clear(host);
}
}
}
@@ -220,14 +207,13 @@ Handle<FixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
}
-void FeedbackNexus::InstallHandlers(int start_index, TypeHandleList* types,
+void FeedbackNexus::InstallHandlers(int start_index, MapHandleList* maps,
CodeHandleList* handlers) {
Isolate* isolate = GetIsolate();
Handle<FixedArray> array = handle(FixedArray::cast(GetFeedback()), isolate);
- int receiver_count = types->length();
+ int receiver_count = maps->length();
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate);
+ Handle<Map> map = maps->at(current);
Handle<WeakCell> cell = Map::WeakCellForMap(map);
array->set(start_index + (current * 2), *cell);
array->set(start_index + (current * 2 + 1), *handlers->at(current));
@@ -264,8 +250,8 @@ InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
return UNINITIALIZED;
} else if (feedback == *vector()->PremonomorphicSentinel(isolate)) {
return PREMONOMORPHIC;
- } else if (feedback == *vector()->GenericSentinel(isolate)) {
- return GENERIC;
+ } else if (feedback == *vector()->MegamorphicSentinel(isolate)) {
+ return MEGAMORPHIC;
} else if (feedback->IsFixedArray()) {
// Determine state purely by our structure, don't check if the maps are
// cleared.
@@ -285,7 +271,7 @@ InlineCacheState CallICNexus::StateFromFeedback() const {
if (feedback == *vector()->MegamorphicSentinel(isolate)) {
return GENERIC;
- } else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) {
+ } else if (feedback->IsAllocationSite() || feedback->IsWeakCell()) {
return MONOMORPHIC;
}
@@ -319,12 +305,13 @@ void CallICNexus::ConfigureUninitialized() {
void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
- SetFeedback(*function);
+ Handle<WeakCell> new_cell = GetIsolate()->factory()->NewWeakCell(function);
+ SetFeedback(*new_cell);
}
-void KeyedLoadICNexus::ConfigureGeneric() {
- SetFeedback(*vector()->GenericSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
+void KeyedLoadICNexus::ConfigureMegamorphic() {
+ SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
}
@@ -345,10 +332,9 @@ void KeyedLoadICNexus::ConfigurePremonomorphic() {
}
-void LoadICNexus::ConfigureMonomorphic(Handle<HeapType> type,
+void LoadICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
Handle<Code> handler) {
Handle<FixedArray> array = EnsureArrayOfSize(2);
- Handle<Map> receiver_map = IC::TypeToMap(*type, GetIsolate());
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
array->set(0, *cell);
array->set(1, *handler);
@@ -356,10 +342,9 @@ void LoadICNexus::ConfigureMonomorphic(Handle<HeapType> type,
void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
- Handle<HeapType> type,
+ Handle<Map> receiver_map,
Handle<Code> handler) {
Handle<FixedArray> array = EnsureArrayOfSize(3);
- Handle<Map> receiver_map = IC::TypeToMap(*type, GetIsolate());
if (name.is_null()) {
array->set(0, Smi::FromInt(0));
} else {
@@ -371,25 +356,25 @@ void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
}
-void LoadICNexus::ConfigurePolymorphic(TypeHandleList* types,
+void LoadICNexus::ConfigurePolymorphic(MapHandleList* maps,
CodeHandleList* handlers) {
- int receiver_count = types->length();
+ int receiver_count = maps->length();
EnsureArrayOfSize(receiver_count * 2);
- InstallHandlers(0, types, handlers);
+ InstallHandlers(0, maps, handlers);
}
void KeyedLoadICNexus::ConfigurePolymorphic(Handle<Name> name,
- TypeHandleList* types,
+ MapHandleList* maps,
CodeHandleList* handlers) {
- int receiver_count = types->length();
+ int receiver_count = maps->length();
Handle<FixedArray> array = EnsureArrayOfSize(1 + receiver_count * 2);
if (name.is_null()) {
array->set(0, Smi::FromInt(0));
} else {
array->set(0, *name);
}
- InstallHandlers(1, types, handlers);
+ InstallHandlers(1, maps, handlers);
}
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index 864f336f90..b7abad51f1 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -120,6 +120,7 @@ class TypeFeedbackVector : public FixedArray {
// Conversion from a slot or ic slot to an integer index to the underlying
// array.
int GetIndex(FeedbackVectorSlot slot) const {
+ DCHECK(slot.ToInt() < first_ic_slot_index());
return kReservedIndexCount + ic_metadata_length() + slot.ToInt();
}
@@ -165,6 +166,12 @@ class TypeFeedbackVector : public FixedArray {
// Clears the vector slots and the vector ic slots.
void ClearSlots(SharedFunctionInfo* shared);
+ void ClearICSlots(SharedFunctionInfo* shared) {
+ ClearICSlotsImpl(shared, true);
+ }
+ void ClearICSlotsAtGCTime(SharedFunctionInfo* shared) {
+ ClearICSlotsImpl(shared, false);
+ }
// The object that indicates an uninitialized cache.
static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
@@ -175,9 +182,6 @@ class TypeFeedbackVector : public FixedArray {
// The object that indicates a premonomorphic state.
static inline Handle<Object> PremonomorphicSentinel(Isolate* isolate);
- // The object that indicates a generic state.
- static inline Handle<Object> GenericSentinel(Isolate* isolate);
-
// The object that indicates a monomorphic state of Array with
// ElementsKind
static inline Handle<Object> MonomorphicArraySentinel(
@@ -203,6 +207,8 @@ class TypeFeedbackVector : public FixedArray {
typedef BitSetComputer<VectorICKind, kVectorICKindBits, kSmiValueSize,
uint32_t> VectorICComputer;
+ void ClearICSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackVector);
};
@@ -256,7 +262,7 @@ class FeedbackNexus {
}
Handle<FixedArray> EnsureArrayOfSize(int length);
- void InstallHandlers(int start_index, TypeHandleList* types,
+ void InstallHandlers(int start_index, MapHandleList* maps,
CodeHandleList* handlers);
int ExtractMaps(int start_index, MapHandleList* maps) const;
MaybeHandle<Code> FindHandlerForMap(int start_index, Handle<Map> map) const;
@@ -323,9 +329,9 @@ class LoadICNexus : public FeedbackNexus {
void ConfigureMegamorphic();
void ConfigurePremonomorphic();
- void ConfigureMonomorphic(Handle<HeapType> type, Handle<Code> handler);
+ void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Code> handler);
- void ConfigurePolymorphic(TypeHandleList* types, CodeHandleList* handlers);
+ void ConfigurePolymorphic(MapHandleList* maps, CodeHandleList* handlers);
InlineCacheState StateFromFeedback() const OVERRIDE;
int ExtractMaps(MapHandleList* maps) const OVERRIDE;
@@ -348,13 +354,13 @@ class KeyedLoadICNexus : public FeedbackNexus {
void Clear(Code* host);
- void ConfigureGeneric();
+ void ConfigureMegamorphic();
void ConfigurePremonomorphic();
// name can be a null handle for element loads.
- void ConfigureMonomorphic(Handle<Name> name, Handle<HeapType> type,
+ void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
Handle<Code> handler);
// name can be null.
- void ConfigurePolymorphic(Handle<Name> name, TypeHandleList* types,
+ void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
CodeHandleList* handlers);
InlineCacheState StateFromFeedback() const OVERRIDE;
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 611373f865..01943414a7 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -16,16 +16,16 @@ namespace internal {
TypeFeedbackOracle::TypeFeedbackOracle(
- Handle<Code> code, Handle<TypeFeedbackVector> feedback_vector,
- Handle<Context> native_context, Zone* zone)
- : native_context_(native_context), zone_(zone) {
+ Isolate* isolate, Zone* zone, Handle<Code> code,
+ Handle<TypeFeedbackVector> feedback_vector, Handle<Context> native_context)
+ : native_context_(native_context), isolate_(isolate), zone_(zone) {
BuildDictionary(code);
DCHECK(dictionary_->IsDictionary());
// We make a copy of the feedback vector because a GC could clear
// the type feedback info contained therein.
// TODO(mvstanton): revisit the decision to copy when we weakly
// traverse the feedback vector at GC time.
- feedback_vector_ = TypeFeedbackVector::Copy(isolate(), feedback_vector);
+ feedback_vector_ = TypeFeedbackVector::Copy(isolate, feedback_vector);
}
@@ -62,12 +62,23 @@ Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorSlot slot) {
Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorICSlot slot) {
DCHECK(slot.ToInt() >= 0 && slot.ToInt() < feedback_vector_->length());
+ Handle<Object> undefined =
+ Handle<Object>::cast(isolate()->factory()->undefined_value());
Object* obj = feedback_vector_->Get(slot);
+
+ // Vector-based ICs do not embed direct pointers to maps, functions.
+ // Instead a WeakCell is always used.
+ if (obj->IsWeakCell()) {
+ WeakCell* cell = WeakCell::cast(obj);
+ if (cell->cleared()) return undefined;
+ obj = cell->value();
+ }
+
if (!obj->IsJSFunction() ||
!CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
return Handle<Object>(obj, isolate());
}
- return Handle<Object>::cast(isolate()->factory()->undefined_value());
+ return undefined;
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 60f156f5f3..65af76865e 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -8,7 +8,7 @@
#include "src/allocation.h"
#include "src/globals.h"
#include "src/types.h"
-#include "src/zone-inl.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -19,9 +19,9 @@ class SmallMapList;
class TypeFeedbackOracle: public ZoneObject {
public:
- TypeFeedbackOracle(Handle<Code> code,
+ TypeFeedbackOracle(Isolate* isolate, Zone* zone, Handle<Code> code,
Handle<TypeFeedbackVector> feedback_vector,
- Handle<Context> native_context, Zone* zone);
+ Handle<Context> native_context);
bool LoadIsUninitialized(TypeFeedbackId id);
bool LoadIsUninitialized(FeedbackVectorICSlot slot);
@@ -101,7 +101,7 @@ class TypeFeedbackOracle: public ZoneObject {
Type* CountType(TypeFeedbackId id);
Zone* zone() const { return zone_; }
- Isolate* isolate() const { return zone_->isolate(); }
+ Isolate* isolate() const { return isolate_; }
private:
void CollectReceiverTypes(TypeFeedbackId id,
@@ -137,6 +137,7 @@ class TypeFeedbackOracle: public ZoneObject {
private:
Handle<Context> native_context_;
+ Isolate* isolate_;
Zone* zone_;
Handle<UnseededNumberDictionary> dictionary_;
Handle<TypeFeedbackVector> feedback_vector_;
diff --git a/deps/v8/src/types-inl.h b/deps/v8/src/types-inl.h
index 2e7f8a36fa..762a11df30 100644
--- a/deps/v8/src/types-inl.h
+++ b/deps/v8/src/types-inl.h
@@ -16,6 +16,19 @@ namespace internal {
// -----------------------------------------------------------------------------
// TypeImpl
+template <class Config>
+typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::SignedSmall() {
+ return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
+}
+
+
+template <class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::UnsignedSmall() {
+ return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
+}
+
+
template<class Config>
TypeImpl<Config>* TypeImpl<Config>::cast(typename Config::Base* object) {
TypeImpl* t = static_cast<TypeImpl*>(object);
@@ -83,7 +96,18 @@ bool ZoneTypeConfig::is_bitset(Type* type) {
// static
bool ZoneTypeConfig::is_struct(Type* type, int tag) {
- return !is_bitset(type) && struct_tag(as_struct(type)) == tag;
+ DCHECK(tag != kRangeStructTag);
+ if (is_bitset(type)) return false;
+ int type_tag = struct_tag(as_struct(type));
+ return type_tag == tag;
+}
+
+
+// static
+bool ZoneTypeConfig::is_range(Type* type) {
+ if (is_bitset(type)) return false;
+ int type_tag = struct_tag(as_struct(type));
+ return type_tag == kRangeStructTag;
}
@@ -108,6 +132,13 @@ ZoneTypeConfig::Struct* ZoneTypeConfig::as_struct(Type* type) {
// static
+ZoneTypeConfig::Range* ZoneTypeConfig::as_range(Type* type) {
+ DCHECK(!is_bitset(type));
+ return reinterpret_cast<Range*>(type);
+}
+
+
+// static
i::Handle<i::Map> ZoneTypeConfig::as_class(Type* type) {
UNREACHABLE();
return i::Handle<i::Map>();
@@ -134,6 +165,12 @@ ZoneTypeConfig::Type* ZoneTypeConfig::from_struct(Struct* structure) {
// static
+ZoneTypeConfig::Type* ZoneTypeConfig::from_range(Range* range) {
+ return reinterpret_cast<Type*>(range);
+}
+
+
+// static
ZoneTypeConfig::Type* ZoneTypeConfig::from_class(
i::Handle<i::Map> map, Zone* zone) {
return from_bitset(0);
@@ -143,6 +180,7 @@ ZoneTypeConfig::Type* ZoneTypeConfig::from_class(
// static
ZoneTypeConfig::Struct* ZoneTypeConfig::struct_create(
int tag, int length, Zone* zone) {
+ DCHECK(tag != kRangeStructTag);
Struct* structure = reinterpret_cast<Struct*>(
zone->New(sizeof(void*) * (length + 2))); // NOLINT
structure[0] = reinterpret_cast<void*>(tag);
@@ -201,6 +239,45 @@ void ZoneTypeConfig::struct_set_value(
}
+// static
+ZoneTypeConfig::Range* ZoneTypeConfig::range_create(Zone* zone) {
+ Range* range = reinterpret_cast<Range*>(zone->New(sizeof(Range))); // NOLINT
+ range->tag = reinterpret_cast<void*>(kRangeStructTag);
+ range->bitset = 0;
+ range->limits[0] = 1;
+ range->limits[1] = 0;
+ return range;
+}
+
+
+// static
+int ZoneTypeConfig::range_get_bitset(ZoneTypeConfig::Range* range) {
+ return range->bitset;
+}
+
+
+// static
+void ZoneTypeConfig::range_set_bitset(ZoneTypeConfig::Range* range, int value) {
+ range->bitset = value;
+}
+
+
+// static
+double ZoneTypeConfig::range_get_double(ZoneTypeConfig::Range* range,
+ int index) {
+ DCHECK(index >= 0 && index < 2);
+ return range->limits[index];
+}
+
+
+// static
+void ZoneTypeConfig::range_set_double(ZoneTypeConfig::Range* range, int index,
+ double value, Zone*) {
+ DCHECK(index >= 0 && index < 2);
+ range->limits[index] = value;
+}
+
+
// -----------------------------------------------------------------------------
// HeapTypeConfig
@@ -239,11 +316,18 @@ bool HeapTypeConfig::is_class(Type* type) {
// static
bool HeapTypeConfig::is_struct(Type* type, int tag) {
+ DCHECK(tag != kRangeStructTag);
return type->IsFixedArray() && struct_tag(as_struct(type)) == tag;
}
// static
+bool HeapTypeConfig::is_range(Type* type) {
+ return type->IsFixedArray() && struct_tag(as_struct(type)) == kRangeStructTag;
+}
+
+
+// static
HeapTypeConfig::Type::bitset HeapTypeConfig::as_bitset(Type* type) {
// TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
return static_cast<Type::bitset>(reinterpret_cast<uintptr_t>(type));
@@ -263,6 +347,12 @@ i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::as_struct(Type* type) {
// static
+i::Handle<HeapTypeConfig::Range> HeapTypeConfig::as_range(Type* type) {
+ return i::handle(Range::cast(type));
+}
+
+
+// static
HeapTypeConfig::Type* HeapTypeConfig::from_bitset(Type::bitset bitset) {
// TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
return reinterpret_cast<Type*>(static_cast<uintptr_t>(bitset));
@@ -291,6 +381,13 @@ i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_struct(
// static
+i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_range(
+ i::Handle<Range> range) {
+ return i::Handle<Type>::cast(i::Handle<Object>::cast(range));
+}
+
+
+// static
i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::struct_create(
int tag, int length, Isolate* isolate) {
i::Handle<Struct> structure = isolate->factory()->NewFixedArray(length + 1);
@@ -348,6 +445,46 @@ void HeapTypeConfig::struct_set_value(
structure->set(i + 1, *x);
}
+
+// static
+i::Handle<HeapTypeConfig::Range> HeapTypeConfig::range_create(
+ Isolate* isolate) {
+ i::Handle<Range> range = isolate->factory()->NewFixedArray(4);
+ range->set(0, i::Smi::FromInt(kRangeStructTag));
+ return range;
+}
+
+
+// static
+int HeapTypeConfig::range_get_bitset(i::Handle<HeapTypeConfig::Range> range) {
+ Type* v = static_cast<Type*>(range->get(1));
+ return as_bitset(v);
+}
+
+
+// static
+void HeapTypeConfig::range_set_bitset(i::Handle<HeapTypeConfig::Range> range,
+ int value) {
+ range->set(1, from_bitset(value));
+}
+
+
+// static
+double HeapTypeConfig::range_get_double(i::Handle<HeapTypeConfig::Range> range,
+ int index) {
+ DCHECK(index >= 0 && index < 2);
+ return range->get(index + 2)->Number();
+}
+
+
+// static
+void HeapTypeConfig::range_set_double(i::Handle<HeapTypeConfig::Range> range,
+ int index, double value,
+ Isolate* isolate) {
+ DCHECK(index >= 0 && index < 2);
+ i::Handle<Object> number = isolate->factory()->NewNumber(value);
+ range->set(index + 2, *number);
+}
} } // namespace v8::internal
#endif // V8_TYPES_INL_H_
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index c4f1bae5fb..37386cd82f 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -26,19 +26,27 @@ typename TypeImpl<Config>::Limits TypeImpl<Config>::Intersect(
Limits lhs, Limits rhs) {
DisallowHeapAllocation no_allocation;
Limits result(lhs);
- if (lhs.min->Number() < rhs.min->Number()) result.min = rhs.min;
- if (lhs.max->Number() > rhs.max->Number()) result.max = rhs.max;
+ if (lhs.min < rhs.min) result.min = rhs.min;
+ if (lhs.max > rhs.max) result.max = rhs.max;
return result;
}
-template<class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::Union(
- Limits lhs, Limits rhs) {
+template <class Config>
+bool TypeImpl<Config>::IsEmpty(Limits lim) {
+ return lim.min > lim.max;
+}
+
+
+template <class Config>
+typename TypeImpl<Config>::Limits TypeImpl<Config>::Union(Limits lhs,
+ Limits rhs) {
DisallowHeapAllocation no_allocation;
+ if (IsEmpty(lhs)) return rhs;
+ if (IsEmpty(rhs)) return lhs;
Limits result(lhs);
- if (lhs.min->Number() > rhs.min->Number()) result.min = rhs.min;
- if (lhs.max->Number() < rhs.max->Number()) result.max = rhs.max;
+ if (lhs.min > rhs.min) result.min = rhs.min;
+ if (lhs.max < rhs.max) result.max = rhs.max;
return result;
}
@@ -49,7 +57,7 @@ bool TypeImpl<Config>::Overlap(
typename TypeImpl<Config>::RangeType* rhs) {
DisallowHeapAllocation no_allocation;
typename TypeImpl<Config>::Limits lim = Intersect(Limits(lhs), Limits(rhs));
- return lim.min->Number() <= lim.max->Number();
+ return lim.min <= lim.max;
}
@@ -58,8 +66,17 @@ bool TypeImpl<Config>::Contains(
typename TypeImpl<Config>::RangeType* lhs,
typename TypeImpl<Config>::RangeType* rhs) {
DisallowHeapAllocation no_allocation;
- return lhs->Min()->Number() <= rhs->Min()->Number()
- && rhs->Max()->Number() <= lhs->Max()->Number();
+ return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
+}
+
+
+template <class Config>
+bool TypeImpl<Config>::Contains(typename TypeImpl<Config>::RangeType* lhs,
+ typename TypeImpl<Config>::ConstantType* rhs) {
+ DisallowHeapAllocation no_allocation;
+ return IsInteger(*rhs->Value()) &&
+ lhs->Min() <= rhs->Value()->Number() &&
+ rhs->Value()->Number() <= lhs->Max();
}
@@ -67,9 +84,8 @@ template<class Config>
bool TypeImpl<Config>::Contains(
typename TypeImpl<Config>::RangeType* range, i::Object* val) {
DisallowHeapAllocation no_allocation;
- return IsInteger(val)
- && range->Min()->Number() <= val->Number()
- && val->Number() <= range->Max()->Number();
+ return IsInteger(val) &&
+ range->Min() <= val->Number() && val->Number() <= range->Max();
}
@@ -78,7 +94,7 @@ bool TypeImpl<Config>::Contains(
template<class Config>
double TypeImpl<Config>::Min() {
- DCHECK(this->Is(Number()));
+ DCHECK(this->SemanticIs(Number()));
if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
if (this->IsUnion()) {
double min = +V8_INFINITY;
@@ -87,7 +103,7 @@ double TypeImpl<Config>::Min() {
}
return min;
}
- if (this->IsRange()) return this->AsRange()->Min()->Number();
+ if (this->IsRange()) return this->AsRange()->Min();
if (this->IsConstant()) return this->AsConstant()->Value()->Number();
UNREACHABLE();
return 0;
@@ -96,7 +112,7 @@ double TypeImpl<Config>::Min() {
template<class Config>
double TypeImpl<Config>::Max() {
- DCHECK(this->Is(Number()));
+ DCHECK(this->SemanticIs(Number()));
if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
if (this->IsUnion()) {
double max = -V8_INFINITY;
@@ -105,7 +121,7 @@ double TypeImpl<Config>::Max() {
}
return max;
}
- if (this->IsRange()) return this->AsRange()->Max()->Number();
+ if (this->IsRange()) return this->AsRange()->Max();
if (this->IsConstant()) return this->AsConstant()->Value()->Number();
UNREACHABLE();
return 0;
@@ -121,14 +137,19 @@ template<class Config>
typename TypeImpl<Config>::bitset
TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
DisallowHeapAllocation no_allocation;
+ // Fast case.
if (type->IsBitset()) {
return type->AsBitset();
} else if (type->IsUnion()) {
SLOW_DCHECK(type->AsUnion()->Wellformed());
- return type->AsUnion()->Get(0)->BitsetGlb(); // Shortcut.
- // (The remaining BitsetGlb's are None anyway).
+ return type->AsUnion()->Get(0)->BitsetGlb() |
+ SEMANTIC(type->AsUnion()->Get(1)->BitsetGlb()); // Shortcut.
+ } else if (type->IsRange()) {
+ bitset glb = SEMANTIC(
+ BitsetType::Glb(type->AsRange()->Min(), type->AsRange()->Max()));
+ return glb | REPRESENTATION(type->BitsetLub());
} else {
- return kNone;
+ return type->Representation();
}
}
@@ -140,9 +161,12 @@ TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
DisallowHeapAllocation no_allocation;
if (type->IsBitset()) return type->AsBitset();
if (type->IsUnion()) {
- int bitset = kNone;
+ // Take the representation from the first element, which is always
+ // a bitset.
+ int bitset = type->AsUnion()->Get(0)->BitsetLub();
for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
- bitset |= type->AsUnion()->Get(i)->BitsetLub();
+ // Other elements only contribute their semantic part.
+ bitset |= SEMANTIC(type->AsUnion()->Get(i)->BitsetLub());
}
return bitset;
}
@@ -152,7 +176,7 @@ TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
type->AsClass()->Bound(NULL)->AsBitset();
}
if (type->IsConstant()) return type->AsConstant()->Bound()->AsBitset();
- if (type->IsRange()) return type->AsRange()->BitsetLub();
+ if (type->IsRange()) return type->AsRange()->Bound();
if (type->IsContext()) return kInternal & kTaggedPointer;
if (type->IsArray()) return kArray;
if (type->IsFunction()) return kOtherObject; // TODO(rossberg): kFunction
@@ -251,6 +275,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case FIXED_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
case FOREIGN_TYPE:
+ case SCRIPT_TYPE:
case CODE_TYPE:
return kInternal & kTaggedPointer;
default:
@@ -283,29 +308,33 @@ TypeImpl<Config>::BitsetType::Lub(double value) {
}
-// Minimum values of regular numeric bitsets when SmiValuesAre31Bits.
+// Minimum values of regular numeric bitsets.
template <class Config>
-const typename TypeImpl<Config>::BitsetType::BitsetMin
- TypeImpl<Config>::BitsetType::BitsetMins31[] = {
- {kOtherNumber, -V8_INFINITY},
- {kOtherSigned32, kMinInt},
- {kNegativeSignedSmall, -0x40000000},
- {kUnsignedSmall, 0},
- {kOtherUnsigned31, 0x40000000},
- {kOtherUnsigned32, 0x80000000},
- {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}};
-
-
-// Minimum values of regular numeric bitsets when SmiValuesAre32Bits.
-// OtherSigned32 and OtherUnsigned31 are empty (see the diagrams in types.h).
+const typename TypeImpl<Config>::BitsetType::Boundary
+ TypeImpl<Config>::BitsetType::BoundariesArray[] = {
+ {kPlainNumber, -V8_INFINITY},
+ {kNegative32, kMinInt},
+ {kNegative31, -0x40000000},
+ {kUnsigned30, 0},
+ {kUnsigned31, 0x40000000},
+ {kUnsigned32, 0x80000000},
+ {kPlainNumber, static_cast<double>(kMaxUInt32) + 1}
+};
+
+
template <class Config>
-const typename TypeImpl<Config>::BitsetType::BitsetMin
- TypeImpl<Config>::BitsetType::BitsetMins32[] = {
- {kOtherNumber, -V8_INFINITY},
- {kNegativeSignedSmall, kMinInt},
- {kUnsignedSmall, 0},
- {kOtherUnsigned32, 0x80000000},
- {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}};
+const typename TypeImpl<Config>::BitsetType::Boundary*
+TypeImpl<Config>::BitsetType::Boundaries() {
+ return BoundariesArray;
+}
+
+
+template <class Config>
+size_t TypeImpl<Config>::BitsetType::BoundariesSize() {
+ // Windows doesn't like arraysize here.
+ // return arraysize(BoundariesArray);
+ return 7;
+}
template<class Config>
@@ -313,56 +342,97 @@ typename TypeImpl<Config>::bitset
TypeImpl<Config>::BitsetType::Lub(double min, double max) {
DisallowHeapAllocation no_allocation;
int lub = kNone;
- const BitsetMin* mins = BitsetMins();
+ const Boundary* mins = Boundaries();
// Make sure the min-max range touches 0, so we are guaranteed no holes
// in unions of valid bitsets.
if (max < -1) max = -1;
if (min > 0) min = 0;
- for (size_t i = 1; i < BitsetMinsSize(); ++i) {
+ for (size_t i = 1; i < BoundariesSize(); ++i) {
if (min < mins[i].min) {
lub |= mins[i-1].bits;
if (max < mins[i].min) return lub;
}
}
- return lub |= mins[BitsetMinsSize()-1].bits;
+ return lub |= mins[BoundariesSize() - 1].bits;
}
-template<class Config>
+template <class Config>
+typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::NumberBits(
+ bitset bits) {
+ return SEMANTIC(bits & kPlainNumber);
+}
+
+
+template <class Config>
+void TypeImpl<Config>::BitsetType::CheckNumberBits(bitset bits) {
+ // Check that the bitset does not contain any holes in number ranges.
+ bitset number_bits = NumberBits(bits);
+ if (number_bits != 0) {
+ bitset lub = SEMANTIC(Lub(Min(number_bits), Max(number_bits)));
+ CHECK(lub == number_bits);
+ }
+}
+
+template <class Config>
+typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::Glb(
+ double min, double max) {
+ DisallowHeapAllocation no_allocation;
+ int glb = kNone;
+ const Boundary* mins = Boundaries();
+
+ // If the range does not touch 0, the bound is empty.
+ if (max < -1 || min > 0) return glb;
+
+ for (size_t i = 1; i + 1 < BoundariesSize(); ++i) {
+ if (min <= mins[i].min) {
+ if (max + 1 < mins[i + 1].min) break;
+ glb |= mins[i].bits;
+ }
+ }
+ // OtherNumber also contains float numbers, so it can never be
+ // in the greatest lower bound. (There is also the small trouble
+ // of kOtherNumber having a range hole, which we can conveniently
+ // ignore here.)
+ return glb & ~(SEMANTIC(kOtherNumber));
+}
+
+
+template <class Config>
double TypeImpl<Config>::BitsetType::Min(bitset bits) {
DisallowHeapAllocation no_allocation;
- DCHECK(Is(bits, kNumber));
- const BitsetMin* mins = BitsetMins();
+ DCHECK(Is(SEMANTIC(bits), kNumber));
+ const Boundary* mins = Boundaries();
bool mz = SEMANTIC(bits & kMinusZero);
- for (size_t i = 0; i < BitsetMinsSize(); ++i) {
+ for (size_t i = 0; i < BoundariesSize(); ++i) {
if (Is(SEMANTIC(mins[i].bits), bits)) {
return mz ? std::min(0.0, mins[i].min) : mins[i].min;
}
}
if (mz) return 0;
- return base::OS::nan_value();
+ return std::numeric_limits<double>::quiet_NaN();
}
template<class Config>
double TypeImpl<Config>::BitsetType::Max(bitset bits) {
DisallowHeapAllocation no_allocation;
- DCHECK(Is(bits, kNumber));
- const BitsetMin* mins = BitsetMins();
+ DCHECK(Is(SEMANTIC(bits), kNumber));
+ const Boundary* mins = Boundaries();
bool mz = SEMANTIC(bits & kMinusZero);
- if (BitsetType::Is(mins[BitsetMinsSize()-1].bits, bits)) {
+ if (BitsetType::Is(SEMANTIC(mins[BoundariesSize() - 1].bits), bits)) {
return +V8_INFINITY;
}
- for (size_t i = BitsetMinsSize()-1; i-- > 0; ) {
+ for (size_t i = BoundariesSize() - 1; i-- > 0;) {
if (Is(SEMANTIC(mins[i].bits), bits)) {
return mz ?
std::max(0.0, mins[i+1].min - 1) : mins[i+1].min - 1;
}
}
if (mz) return 0;
- return base::OS::nan_value();
+ return std::numeric_limits<double>::quiet_NaN();
}
@@ -408,22 +478,55 @@ bool TypeImpl<Config>::SimplyEquals(TypeImpl* that) {
}
+template <class Config>
+typename TypeImpl<Config>::bitset TypeImpl<Config>::Representation() {
+ return REPRESENTATION(this->BitsetLub());
+}
+
+
// Check if [this] <= [that].
template<class Config>
bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
DisallowHeapAllocation no_allocation;
+ // Fast bitset cases
if (that->IsBitset()) {
return BitsetType::Is(this->BitsetLub(), that->AsBitset());
}
+
if (this->IsBitset()) {
return BitsetType::Is(this->AsBitset(), that->BitsetGlb());
}
+ // Check the representations.
+ if (!BitsetType::Is(Representation(), that->Representation())) {
+ return false;
+ }
+
+ // Check the semantic part.
+ return SemanticIs(that);
+}
+
+
+// Check if SEMANTIC([this]) <= SEMANTIC([that]). The result of the method
+// should be independent of the representation axis of the types.
+template <class Config>
+bool TypeImpl<Config>::SemanticIs(TypeImpl* that) {
+ DisallowHeapAllocation no_allocation;
+
+ if (this == that) return true;
+
+ if (that->IsBitset()) {
+ return BitsetType::Is(SEMANTIC(this->BitsetLub()), that->AsBitset());
+ }
+ if (this->IsBitset()) {
+ return BitsetType::Is(SEMANTIC(this->AsBitset()), that->BitsetGlb());
+ }
+
// (T1 \/ ... \/ Tn) <= T if (T1 <= T) /\ ... /\ (Tn <= T)
if (this->IsUnion()) {
for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- if (!this->AsUnion()->Get(i)->Is(that)) return false;
+ if (!this->AsUnion()->Get(i)->SemanticIs(that)) return false;
}
return true;
}
@@ -431,16 +534,16 @@ bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
// T <= (T1 \/ ... \/ Tn) if (T <= T1) \/ ... \/ (T <= Tn)
if (that->IsUnion()) {
for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
- if (this->Is(that->AsUnion()->Get(i))) return true;
+ if (this->SemanticIs(that->AsUnion()->Get(i)->unhandle())) return true;
if (i > 1 && this->IsRange()) return false; // Shortcut.
}
return false;
}
if (that->IsRange()) {
- return (this->IsRange() && Contains(that->AsRange(), this->AsRange()))
- || (this->IsConstant() &&
- Contains(that->AsRange(), *this->AsConstant()->Value()));
+ return (this->IsRange() && Contains(that->AsRange(), this->AsRange())) ||
+ (this->IsConstant() &&
+ Contains(that->AsRange(), this->AsConstant()));
}
if (this->IsRange()) return false;
@@ -472,10 +575,7 @@ bool TypeImpl<Config>::NowIs(TypeImpl* that) {
template<class Config>
bool TypeImpl<Config>::NowStable() {
DisallowHeapAllocation no_allocation;
- for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) {
- if (!it.Current()->is_stable()) return false;
- }
- return true;
+ return !this->IsClass() || this->AsClass()->Map()->is_stable();
}
@@ -484,10 +584,22 @@ template<class Config>
bool TypeImpl<Config>::Maybe(TypeImpl* that) {
DisallowHeapAllocation no_allocation;
+ // Take care of the representation part (and also approximate
+ // the semantic part).
+ if (!BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
+ return false;
+
+ return SemanticMaybe(that);
+}
+
+template <class Config>
+bool TypeImpl<Config>::SemanticMaybe(TypeImpl* that) {
+ DisallowHeapAllocation no_allocation;
+
// (T1 \/ ... \/ Tn) overlaps T if (T1 overlaps T) \/ ... \/ (Tn overlaps T)
if (this->IsUnion()) {
for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- if (this->AsUnion()->Get(i)->Maybe(that)) return true;
+ if (this->AsUnion()->Get(i)->SemanticMaybe(that)) return true;
}
return false;
}
@@ -495,30 +607,41 @@ bool TypeImpl<Config>::Maybe(TypeImpl* that) {
// T overlaps (T1 \/ ... \/ Tn) if (T overlaps T1) \/ ... \/ (T overlaps Tn)
if (that->IsUnion()) {
for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
- if (this->Maybe(that->AsUnion()->Get(i))) return true;
+ if (this->SemanticMaybe(that->AsUnion()->Get(i)->unhandle())) return true;
}
return false;
}
- if (!BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
+ if (!BitsetType::SemanticIsInhabited(this->BitsetLub() & that->BitsetLub()))
return false;
- if (this->IsBitset() || that->IsBitset()) return true;
+
+ if (this->IsBitset() && that->IsBitset()) return true;
if (this->IsClass() != that->IsClass()) return true;
if (this->IsRange()) {
if (that->IsConstant()) {
- return Contains(this->AsRange(), *that->AsConstant()->Value());
+ return Contains(this->AsRange(), that->AsConstant());
+ }
+ if (that->IsRange()) {
+ return Overlap(this->AsRange(), that->AsRange());
+ }
+ if (that->IsBitset()) {
+ bitset number_bits = BitsetType::NumberBits(that->AsBitset());
+ if (number_bits == BitsetType::kNone) {
+ return false;
+ }
+ double min = std::max(BitsetType::Min(number_bits), this->Min());
+ double max = std::min(BitsetType::Max(number_bits), this->Max());
+ return min <= max;
}
- return that->IsRange() && Overlap(this->AsRange(), that->AsRange());
}
if (that->IsRange()) {
- if (this->IsConstant()) {
- return Contains(that->AsRange(), *this->AsConstant()->Value());
- }
- return this->IsRange() && Overlap(this->AsRange(), that->AsRange());
+ return that->SemanticMaybe(this); // This case is handled above.
}
+ if (this->IsBitset() || that->IsBitset()) return true;
+
return this->SimplyEquals(that);
}
@@ -554,20 +677,27 @@ bool TypeImpl<Config>::UnionType::Wellformed() {
DisallowHeapAllocation no_allocation;
// This checks the invariants of the union representation:
// 1. There are at least two elements.
- // 2. At most one element is a bitset, and it must be the first one.
- // 3. At most one element is a range, and it must be the second one
- // (even when the first element is not a bitset).
+ // 2. The first element is a bitset, no other element is a bitset.
+ // 3. At most one element is a range, and it must be the second one.
// 4. No element is itself a union.
- // 5. No element is a subtype of any other.
+ // 5. No element (except the bitset) is a subtype of any other.
+ // 6. If there is a range, then the bitset type does not contain
+ // plain number bits.
DCHECK(this->Length() >= 2); // (1)
+ DCHECK(this->Get(0)->IsBitset()); // (2a)
+
for (int i = 0; i < this->Length(); ++i) {
- if (i != 0) DCHECK(!this->Get(i)->IsBitset()); // (2)
- if (i != 1) DCHECK(!this->Get(i)->IsRange()); // (3)
- DCHECK(!this->Get(i)->IsUnion()); // (4)
+ if (i != 0) DCHECK(!this->Get(i)->IsBitset()); // (2b)
+ if (i != 1) DCHECK(!this->Get(i)->IsRange()); // (3)
+ DCHECK(!this->Get(i)->IsUnion()); // (4)
for (int j = 0; j < this->Length(); ++j) {
- if (i != j) DCHECK(!this->Get(i)->Is(this->Get(j))); // (5)
+ if (i != j && i != 0)
+ DCHECK(!this->Get(i)->SemanticIs(this->Get(j)->unhandle())); // (5)
}
}
+ DCHECK(!this->Get(1)->IsRange() ||
+ (BitsetType::NumberBits(this->Get(0)->AsBitset()) ==
+ BitsetType::kNone)); // (6)
return true;
}
@@ -586,12 +716,10 @@ static bool AddIsSafe(int x, int y) {
template<class Config>
typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
TypeHandle type1, TypeHandle type2, Region* region) {
- bitset bits = type1->BitsetGlb() & type2->BitsetGlb();
- if (!BitsetType::IsInhabited(bits)) bits = BitsetType::kNone;
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
- return BitsetType::New(bits, region);
+ return BitsetType::New(type1->AsBitset() & type2->AsBitset(), region);
}
// Fast case: top or bottom types.
@@ -603,6 +731,26 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
if (type2->Is(type1)) return type2;
// Slow case: create union.
+
+ // Figure out the representation of the result first.
+ // The rest of the method should not change this representation and
+ // it should not make any decisions based on representations (i.e.,
+ // it should only use the semantic part of types).
+ const bitset representation =
+ type1->Representation() & type2->Representation();
+
+ // Semantic subtyping check - this is needed for consistency with the
+ // semi-fast case above - we should behave the same way regardless of
+ // representations. Intersection with a universal bitset should only update
+ // the representations.
+ if (type1->SemanticIs(type2->unhandle())) {
+ type2 = Any(region);
+ } else if (type2->SemanticIs(type1->unhandle())) {
+ type1 = Any(region);
+ }
+
+ bitset bits =
+ SEMANTIC(type1->BitsetGlb() & type2->BitsetGlb()) | representation;
int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
if (!AddIsSafe(size1, size2)) return Any(region);
@@ -615,19 +763,20 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
// Deal with bitsets.
result->Set(size++, BitsetType::New(bits, region));
- // Deal with ranges.
- TypeHandle range = None(region);
- RangeType* range1 = type1->GetRange();
- RangeType* range2 = type2->GetRange();
- if (range1 != NULL && range2 != NULL) {
- Limits lim = Intersect(Limits(range1), Limits(range2));
- if (lim.min->Number() <= lim.max->Number()) {
- range = RangeType::New(lim, region);
- }
- }
- result->Set(size++, range);
+ Limits lims = Limits::Empty(region);
+ size = IntersectAux(type1, type2, result, size, &lims, region);
+
+ // If the range is not empty, then insert it into the union and
+ // remove the number bits from the bitset.
+ if (!IsEmpty(lims)) {
+ size = UpdateRange(RangeType::New(lims, representation, region), result,
+ size, region);
- size = IntersectAux(type1, type2, result, size, region);
+ // Remove the number bits.
+ bitset number_bits = BitsetType::NumberBits(bits);
+ bits &= ~number_bits;
+ result->Set(0, BitsetType::New(bits, region));
+ }
return NormalizeUnion(result, size);
}
@@ -635,18 +784,17 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
template<class Config>
int TypeImpl<Config>::UpdateRange(
RangeHandle range, UnionHandle result, int size, Region* region) {
- TypeHandle old_range = result->Get(1);
- DCHECK(old_range->IsRange() || old_range->IsNone());
- if (range->Is(old_range)) return size;
- if (!old_range->Is(range->unhandle())) {
- range = RangeType::New(
- Union(Limits(range->AsRange()), Limits(old_range->AsRange())), region);
+ if (size == 1) {
+ result->Set(size++, range);
+ } else {
+ // Make space for the range.
+ result->Set(size++, result->Get(1));
+ result->Set(1, range);
}
- result->Set(1, range);
// Remove any components that just got subsumed.
for (int i = 2; i < size; ) {
- if (result->Get(i)->Is(range->unhandle())) {
+ if (result->Get(i)->SemanticIs(range->unhandle())) {
result->Set(i, result->Get(--size));
} else {
++i;
@@ -656,50 +804,78 @@ int TypeImpl<Config>::UpdateRange(
}
-template<class Config>
-int TypeImpl<Config>::IntersectAux(
- TypeHandle lhs, TypeHandle rhs,
- UnionHandle result, int size, Region* region) {
+template <class Config>
+typename TypeImpl<Config>::Limits TypeImpl<Config>::ToLimits(bitset bits,
+ Region* region) {
+ bitset number_bits = BitsetType::NumberBits(bits);
+
+ if (number_bits == BitsetType::kNone) {
+ return Limits::Empty(region);
+ }
+
+ return Limits(BitsetType::Min(number_bits), BitsetType::Max(number_bits));
+}
+
+
+template <class Config>
+typename TypeImpl<Config>::Limits TypeImpl<Config>::IntersectRangeAndBitset(
+ TypeHandle range, TypeHandle bitset, Region* region) {
+ Limits range_lims(range->AsRange());
+ Limits bitset_lims = ToLimits(bitset->AsBitset(), region);
+ return Intersect(range_lims, bitset_lims);
+}
+
+
+template <class Config>
+int TypeImpl<Config>::IntersectAux(TypeHandle lhs, TypeHandle rhs,
+ UnionHandle result, int size, Limits* lims,
+ Region* region) {
if (lhs->IsUnion()) {
for (int i = 0, n = lhs->AsUnion()->Length(); i < n; ++i) {
- size = IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, region);
+ size =
+ IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, lims, region);
}
return size;
}
if (rhs->IsUnion()) {
for (int i = 0, n = rhs->AsUnion()->Length(); i < n; ++i) {
- size = IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, region);
+ size =
+ IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, lims, region);
}
return size;
}
- if (!BitsetType::IsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
+ if (!BitsetType::SemanticIsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
return size;
}
if (lhs->IsRange()) {
- if (rhs->IsBitset() || rhs->IsClass()) {
- return UpdateRange(
- Config::template cast<RangeType>(lhs), result, size, region);
+ if (rhs->IsBitset()) {
+ Limits lim = IntersectRangeAndBitset(lhs, rhs, region);
+
+ if (!IsEmpty(lim)) {
+ *lims = Union(lim, *lims);
+ }
+ return size;
+ }
+ if (rhs->IsClass()) {
+ *lims = Union(Limits(lhs->AsRange()), *lims);
}
- if (rhs->IsConstant() &&
- Contains(lhs->AsRange(), *rhs->AsConstant()->Value())) {
+ if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
return AddToUnion(rhs, result, size, region);
}
+ if (rhs->IsRange()) {
+ Limits lim = Intersect(Limits(lhs->AsRange()), Limits(rhs->AsRange()));
+ if (!IsEmpty(lim)) {
+ *lims = Union(lim, *lims);
+ }
+ }
return size;
}
if (rhs->IsRange()) {
- if (lhs->IsBitset() || lhs->IsClass()) {
- return UpdateRange(
- Config::template cast<RangeType>(rhs), result, size, region);
- }
- if (lhs->IsConstant() &&
- Contains(rhs->AsRange(), *lhs->AsConstant()->Value())) {
- return AddToUnion(lhs, result, size, region);
- }
- return size;
+ // This case is handled symmetrically above.
+ return IntersectAux(rhs, lhs, result, size, lims, region);
}
-
if (lhs->IsBitset() || rhs->IsBitset()) {
return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, region);
}
@@ -713,10 +889,56 @@ int TypeImpl<Config>::IntersectAux(
}
+// Make sure that we produce a well-formed range and bitset:
+// If the range is non-empty, the number bits in the bitset should be
+// clear. Moreover, if we have a canonical range (such as Signed32(),
+// we want to produce a bitset rather than a range.
+template <class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeRangeAndBitset(
+ RangeHandle range, bitset* bits, Region* region) {
+ // Fast path: If the bitset does not mention numbers, we can just keep the
+ // range.
+ bitset number_bits = BitsetType::NumberBits(*bits);
+ if (number_bits == 0) {
+ return range;
+ }
+
+ // If the range is contained within the bitset, return an empty range
+ // (but make sure we take the representation).
+ bitset range_lub = SEMANTIC(range->BitsetLub());
+ if (BitsetType::Is(BitsetType::NumberBits(range_lub), *bits)) {
+ return None(region);
+ }
+
+ // Slow path: reconcile the bitset range and the range.
+ double bitset_min = BitsetType::Min(number_bits);
+ double bitset_max = BitsetType::Max(number_bits);
+
+ double range_min = range->Min();
+ double range_max = range->Max();
+
+ // Remove the number bits from the bitset, they would just confuse us now.
+ *bits &= ~number_bits;
+
+ if (range_min <= bitset_min && range_max >= bitset_max) {
+ // Bitset is contained within the range, just return the range.
+ return range;
+ }
+
+ if (bitset_min < range_min) {
+ range_min = bitset_min;
+ }
+ if (bitset_max > range_max) {
+ range_max = bitset_max;
+ }
+ return RangeType::New(range_min, range_max,
+ BitsetType::New(BitsetType::kNone, region), region);
+}
+
+
template<class Config>
typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
TypeHandle type1, TypeHandle type2, Region* region) {
-
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
return BitsetType::New(type1->AsBitset() | type2->AsBitset(), region);
@@ -730,6 +952,13 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
if (type1->Is(type2)) return type2;
if (type2->Is(type1)) return type1;
+ // Figure out the representation of the result.
+ // The rest of the method should not change this representation and
+ // it should make any decisions based on representations (i.e.,
+ // it should only use the semantic part of types).
+ const bitset representation =
+ type1->Representation() | type2->Representation();
+
// Slow case: create union.
int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
@@ -740,23 +969,26 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
UnionHandle result = UnionType::New(size, region);
size = 0;
- // Deal with bitsets.
- TypeHandle bits = BitsetType::New(
- type1->BitsetGlb() | type2->BitsetGlb(), region);
- result->Set(size++, bits);
+ // Compute the new bitset.
+ bitset new_bitset = SEMANTIC(type1->BitsetGlb() | type2->BitsetGlb());
// Deal with ranges.
TypeHandle range = None(region);
RangeType* range1 = type1->GetRange();
RangeType* range2 = type2->GetRange();
if (range1 != NULL && range2 != NULL) {
- range = RangeType::New(Union(Limits(range1), Limits(range2)), region);
+ Limits lims = Union(Limits(range1), Limits(range2));
+ RangeHandle union_range = RangeType::New(lims, representation, region);
+ range = NormalizeRangeAndBitset(union_range, &new_bitset, region);
} else if (range1 != NULL) {
- range = handle(range1);
+ range = NormalizeRangeAndBitset(handle(range1), &new_bitset, region);
} else if (range2 != NULL) {
- range = handle(range2);
+ range = NormalizeRangeAndBitset(handle(range2), &new_bitset, region);
}
- result->Set(size++, range);
+ new_bitset = SEMANTIC(new_bitset) | representation;
+ TypeHandle bits = BitsetType::New(new_bitset, region);
+ result->Set(size++, bits);
+ if (!range->IsNone()) result->Set(size++, range);
size = AddToUnion(type1, result, size, region);
size = AddToUnion(type2, result, size, region);
@@ -777,7 +1009,7 @@ int TypeImpl<Config>::AddToUnion(
return size;
}
for (int i = 0; i < size; ++i) {
- if (type->Is(result->Get(i))) return size;
+ if (type->SemanticIs(result->Get(i)->unhandle())) return size;
}
result->Set(size++, type);
return size;
@@ -787,16 +1019,22 @@ int TypeImpl<Config>::AddToUnion(
template<class Config>
typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
UnionHandle unioned, int size) {
- DCHECK(size >= 2);
- // If range is subsumed by bitset, use its place for a different type.
- if (unioned->Get(1)->Is(unioned->Get(0))) {
- unioned->Set(1, unioned->Get(--size));
+ DCHECK(size >= 1);
+ DCHECK(unioned->Get(0)->IsBitset());
+ // If the union has just one element, return it.
+ if (size == 1) {
+ return unioned->Get(0);
}
- // If bitset is None, use its place for a different type.
- if (size >= 2 && unioned->Get(0)->IsNone()) {
- unioned->Set(0, unioned->Get(--size));
+ bitset bits = unioned->Get(0)->AsBitset();
+ // If the union only consists of a range, we can get rid of the union.
+ if (size == 2 && SEMANTIC(bits) == BitsetType::kNone) {
+ bitset representation = REPRESENTATION(bits);
+ if (representation == unioned->Get(1)->Representation()) {
+ return unioned->Get(1);
+ }
+ // TODO(jarin) If the element at 1 is range of constant, slap
+ // the representation on it and return that.
}
- if (size == 1) return unioned->Get(0);
unioned->Shrink(size);
SLOW_DCHECK(unioned->Wellformed());
return unioned;
@@ -804,6 +1042,25 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
// -----------------------------------------------------------------------------
+// Component extraction
+
+// static
+template <class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Representation(
+ TypeHandle t, Region* region) {
+ return BitsetType::New(t->Representation(), region);
+}
+
+
+// static
+template <class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Semantic(
+ TypeHandle t, Region* region) {
+ return Intersect(t, BitsetType::New(BitsetType::kSemantic, region), region);
+}
+
+
+// -----------------------------------------------------------------------------
// Iteration.
template<class Config>
@@ -918,7 +1175,8 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
return ConstantType::New(type->AsConstant()->Value(), region);
} else if (type->IsRange()) {
return RangeType::New(
- type->AsRange()->Min(), type->AsRange()->Max(), region);
+ type->AsRange()->Min(), type->AsRange()->Max(),
+ BitsetType::New(REPRESENTATION(type->BitsetLub()), region), region);
} else if (type->IsContext()) {
TypeHandle outer = Convert<OtherType>(type->AsContext()->Outer(), region);
return ContextType::New(outer, region);
@@ -984,16 +1242,18 @@ void TypeImpl<Config>::BitsetType::Print(std::ostream& os, // NOLINT
return;
}
+ // clang-format off
static const bitset named_bitsets[] = {
#define BITSET_CONSTANT(type, value) REPRESENTATION(k##type),
- REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
#undef BITSET_CONSTANT
#define BITSET_CONSTANT(type, value) SEMANTIC(k##type),
- INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
- SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
#undef BITSET_CONSTANT
};
+ // clang-format on
bool is_first = true;
os << "(";
@@ -1026,8 +1286,8 @@ void TypeImpl<Config>::PrintTo(std::ostream& os, PrintDimension dim) {
} else if (this->IsRange()) {
std::ostream::fmtflags saved_flags = os.setf(std::ios::fixed);
std::streamsize saved_precision = os.precision(0);
- os << "Range(" << this->AsRange()->Min()->Number() << ", "
- << this->AsRange()->Max()->Number() << ")";
+ os << "Range(" << this->AsRange()->Min() << ", " << this->AsRange()->Max()
+ << ")";
os.flags(saved_flags);
os.precision(saved_precision);
} else if (this->IsContext()) {
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 4b7d8ba979..0aae064171 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -153,6 +153,8 @@ namespace internal {
// -----------------------------------------------------------------------------
// Values for bitset types
+// clang-format off
+
#define MASK_BITSET_TYPE_LIST(V) \
V(Representation, 0xfff00000u) \
V(Semantic, 0x000ffffeu)
@@ -195,11 +197,11 @@ namespace internal {
V(OtherNumber, 1u << 4 | REPRESENTATION(kTagged | kUntaggedNumber))
#define SEMANTIC_BITSET_TYPE_LIST(V) \
- V(NegativeSignedSmall, 1u << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Negative31, 1u << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \
V(Null, 1u << 6 | REPRESENTATION(kTaggedPointer)) \
V(Undefined, 1u << 7 | REPRESENTATION(kTaggedPointer)) \
V(Boolean, 1u << 8 | REPRESENTATION(kTaggedPointer)) \
- V(UnsignedSmall, 1u << 9 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Unsigned30, 1u << 9 | REPRESENTATION(kTagged | kUntaggedNumber)) \
V(MinusZero, 1u << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
V(NaN, 1u << 11 | REPRESENTATION(kTagged | kUntaggedNumber)) \
V(Symbol, 1u << 12 | REPRESENTATION(kTaggedPointer)) \
@@ -211,11 +213,11 @@ namespace internal {
V(Proxy, 1u << 18 | REPRESENTATION(kTaggedPointer)) \
V(Internal, 1u << 19 | REPRESENTATION(kTagged | kUntagged)) \
\
- V(SignedSmall, kUnsignedSmall | kNegativeSignedSmall) \
- V(Signed32, kSignedSmall | kOtherUnsigned31 | kOtherSigned32) \
- V(NegativeSigned32, kNegativeSignedSmall | kOtherSigned32) \
- V(NonNegativeSigned32, kUnsignedSmall | kOtherUnsigned31) \
- V(Unsigned32, kUnsignedSmall | kOtherUnsigned31 | kOtherUnsigned32) \
+ V(Signed31, kUnsigned30 | kNegative31) \
+ V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
+ V(Negative32, kNegative31 | kOtherSigned32) \
+ V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
+ V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | kOtherUnsigned32) \
V(Integral32, kSigned32 | kUnsigned32) \
V(PlainNumber, kIntegral32 | kOtherNumber) \
V(OrderedNumber, kPlainNumber | kMinusZero) \
@@ -237,29 +239,17 @@ namespace internal {
V(NonNumber, kUnique | kString | kInternal) \
V(Any, 0xfffffffeu)
+// clang-format on
/*
* The following diagrams show how integers (in the mathematical sense) are
* divided among the different atomic numerical types.
*
- * If SmiValuesAre31Bits():
- *
- * ON OS32 OSS US OU31 OU32 ON
+ * ON OS32 N31 U30 OU31 OU32 ON
* ______[_______[_______[_______[_______[_______[_______
* -2^31 -2^30 0 2^30 2^31 2^32
*
- * Otherwise:
- *
- * ON OSS US OU32 ON
- * ______[_______________[_______________[_______[_______
- * -2^31 0 2^31 2^32
- *
- *
* E.g., OtherUnsigned32 (OU32) covers all integers from 2^31 to 2^32-1.
- *
- * NOTE: OtherSigned32 (OS32) and OU31 (OtherUnsigned31) are empty if Smis are
- * 32-bit wide. They should thus never be used directly, only indirectly
- * via e.g. Number.
*/
#define PROPER_BITSET_TYPE_LIST(V) \
@@ -280,21 +270,29 @@ namespace internal {
// typedef TypeImpl<Config> Type;
// typedef Base;
// typedef Struct;
+// typedef Range;
// typedef Region;
// template<class> struct Handle { typedef type; } // No template typedefs...
// template<class T> static Handle<T>::type null_handle();
// template<class T> static Handle<T>::type handle(T* t); // !is_bitset(t)
// template<class T> static Handle<T>::type cast(Handle<Type>::type);
+//
// static bool is_bitset(Type*);
// static bool is_class(Type*);
// static bool is_struct(Type*, int tag);
+// static bool is_range(Type*);
+//
// static bitset as_bitset(Type*);
// static i::Handle<i::Map> as_class(Type*);
// static Handle<Struct>::type as_struct(Type*);
+// static Handle<Range>::type as_range(Type*);
+//
// static Type* from_bitset(bitset);
// static Handle<Type>::type from_bitset(bitset, Region*);
// static Handle<Type>::type from_class(i::Handle<Map>, Region*);
// static Handle<Type>::type from_struct(Handle<Struct>::type, int tag);
+// static Handle<Type>::type from_range(Handle<Range>::type);
+//
// static Handle<Struct>::type struct_create(int tag, int length, Region*);
// static void struct_shrink(Handle<Struct>::type, int length);
// static int struct_tag(Handle<Struct>::type);
@@ -305,6 +303,12 @@ namespace internal {
// static i::Handle<V> struct_get_value(Handle<Struct>::type, int);
// template<class V>
// static void struct_set_value(Handle<Struct>::type, int, i::Handle<V>);
+//
+// static Handle<Range>::type range_create(Region*);
+// static int range_get_bitset(Handle<Range>::type);
+// static void range_set_bitset(Handle<Range>::type, int);
+// static double range_get_double(Handle<Range>::type, int);
+// static void range_set_double(Handle<Range>::type, int, double, Region*);
// }
template<class Config>
class TypeImpl : public Config::Base {
@@ -345,15 +349,31 @@ class TypeImpl : public Config::Base {
PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
#undef DEFINE_TYPE_CONSTRUCTOR
+ static TypeImpl* SignedSmall() {
+ return BitsetType::New(BitsetType::SignedSmall());
+ }
+ static TypeHandle SignedSmall(Region* region) {
+ return BitsetType::New(BitsetType::SignedSmall(), region);
+ }
+ static TypeImpl* UnsignedSmall() {
+ return BitsetType::New(BitsetType::UnsignedSmall());
+ }
+ static TypeHandle UnsignedSmall(Region* region) {
+ return BitsetType::New(BitsetType::UnsignedSmall(), region);
+ }
+
static TypeHandle Class(i::Handle<i::Map> map, Region* region) {
return ClassType::New(map, region);
}
static TypeHandle Constant(i::Handle<i::Object> value, Region* region) {
return ConstantType::New(value, region);
}
- static TypeHandle Range(
- i::Handle<i::Object> min, i::Handle<i::Object> max, Region* region) {
- return RangeType::New(min, max, region);
+ static TypeHandle Range(double min, double max, Region* region) {
+ return RangeType::New(
+ min, max, BitsetType::New(REPRESENTATION(BitsetType::kTagged |
+ BitsetType::kUntaggedNumber),
+ region),
+ region);
}
static TypeHandle Context(TypeHandle outer, Region* region) {
return ContextType::New(outer, region);
@@ -410,8 +430,11 @@ class TypeImpl : public Config::Base {
return Of(*value, region);
}
- // Predicates.
+ // Extraction of components.
+ static TypeHandle Representation(TypeHandle t, Region* region);
+ static TypeHandle Semantic(TypeHandle t, Region* region);
+ // Predicates.
bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
bool Is(TypeImpl* that) { return this == that || this->SlowIs(that); }
@@ -446,6 +469,7 @@ class TypeImpl : public Config::Base {
// Inspection.
+ bool IsRange() { return Config::is_range(this); }
bool IsClass() {
return Config::is_class(this)
|| Config::is_struct(this, StructuralType::kClassTag);
@@ -453,9 +477,6 @@ class TypeImpl : public Config::Base {
bool IsConstant() {
return Config::is_struct(this, StructuralType::kConstantTag);
}
- bool IsRange() {
- return Config::is_struct(this, StructuralType::kRangeTag);
- }
bool IsContext() {
return Config::is_struct(this, StructuralType::kContextTag);
}
@@ -516,6 +537,8 @@ class TypeImpl : public Config::Base {
void Print();
#endif
+ bool IsUnionForTesting() { return IsUnion(); }
+
protected:
// Friends.
@@ -543,12 +566,16 @@ class TypeImpl : public Config::Base {
}
UnionType* AsUnion() { return UnionType::cast(this); }
+ bitset Representation();
+
// Auxiliary functions.
+ bool SemanticMaybe(TypeImpl* that);
bitset BitsetGlb() { return BitsetType::Glb(this); }
bitset BitsetLub() { return BitsetType::Lub(this); }
bool SlowIs(TypeImpl* that);
+ bool SemanticIs(TypeImpl* that);
static bool IsInteger(double x) {
return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
@@ -558,33 +585,39 @@ class TypeImpl : public Config::Base {
}
struct Limits {
- i::Handle<i::Object> min;
- i::Handle<i::Object> max;
- Limits(i::Handle<i::Object> min, i::Handle<i::Object> max) :
- min(min), max(max) {}
- explicit Limits(RangeType* range) :
- min(range->Min()), max(range->Max()) {}
+ double min;
+ double max;
+ Limits(double min, double max) : min(min), max(max) {}
+ explicit Limits(RangeType* range) : min(range->Min()), max(range->Max()) {}
+ static Limits Empty(Region* region) { return Limits(1, 0); }
};
+ static bool IsEmpty(Limits lim);
static Limits Intersect(Limits lhs, Limits rhs);
static Limits Union(Limits lhs, Limits rhs);
static bool Overlap(RangeType* lhs, RangeType* rhs);
static bool Contains(RangeType* lhs, RangeType* rhs);
+ static bool Contains(RangeType* range, ConstantType* constant);
static bool Contains(RangeType* range, i::Object* val);
static int UpdateRange(
RangeHandle type, UnionHandle result, int size, Region* region);
+ static Limits IntersectRangeAndBitset(TypeHandle range, TypeHandle bits,
+ Region* region);
+ static Limits ToLimits(bitset bits, Region* region);
+
bool SimplyEquals(TypeImpl* that);
template<class TypeHandle>
bool SimplyEquals(TypeHandle that) { return this->SimplyEquals(*that); }
static int AddToUnion(
TypeHandle type, UnionHandle result, int size, Region* region);
- static int IntersectAux(
- TypeHandle type, TypeHandle other,
- UnionHandle result, int size, Region* region);
+ static int IntersectAux(TypeHandle type, TypeHandle other, UnionHandle result,
+ int size, Limits* limits, Region* region);
static TypeHandle NormalizeUnion(UnionHandle unioned, int size);
+ static TypeHandle NormalizeRangeAndBitset(RangeHandle range, bitset* bits,
+ Region* region);
};
@@ -603,35 +636,26 @@ class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
kUnusedEOL = 0
};
+ static bitset SignedSmall();
+ static bitset UnsignedSmall();
+
bitset Bitset() { return Config::as_bitset(this); }
static TypeImpl* New(bitset bits) {
- DCHECK(bits == kNone || IsInhabited(bits));
-
- if (FLAG_enable_slow_asserts) {
- // Check that the bitset does not contain any holes in number ranges.
- bitset mask = kSemantic;
- if (!i::SmiValuesAre31Bits()) {
- mask &= ~(kOtherUnsigned31 | kOtherSigned32);
- }
- bitset number_bits = bits & kPlainNumber & mask;
- if (number_bits != 0) {
- bitset lub = Lub(Min(number_bits), Max(number_bits)) & mask;
- CHECK(lub == number_bits);
- }
- }
-
+ if (FLAG_enable_slow_asserts) CheckNumberBits(bits);
return Config::from_bitset(bits);
}
static TypeHandle New(bitset bits, Region* region) {
- DCHECK(bits == kNone || IsInhabited(bits));
+ if (FLAG_enable_slow_asserts) CheckNumberBits(bits);
return Config::from_bitset(bits, region);
}
- // TODO(neis): Eventually allow again for types with empty semantics
- // part and modify intersection and possibly subtyping accordingly.
static bool IsInhabited(bitset bits) {
- return bits & kSemantic;
+ return SEMANTIC(bits) != kNone && REPRESENTATION(bits) != kNone;
+ }
+
+ static bool SemanticIsInhabited(bitset bits) {
+ return SEMANTIC(bits) != kNone;
}
static bool Is(bitset bits1, bitset bits2) {
@@ -642,6 +666,7 @@ class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
static double Max(bitset);
static bitset Glb(TypeImpl* type); // greatest lower bound that's a bitset
+ static bitset Glb(double min, double max);
static bitset Lub(TypeImpl* type); // least upper bound that's a bitset
static bitset Lub(i::Map* map);
static bitset Lub(i::Object* value);
@@ -654,21 +679,18 @@ class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
static void Print(bitset);
#endif
+ static bitset NumberBits(bitset bits);
+
private:
- struct BitsetMin{
+ struct Boundary {
bitset bits;
double min;
};
- static const BitsetMin BitsetMins31[];
- static const BitsetMin BitsetMins32[];
- static const BitsetMin* BitsetMins() {
- return i::SmiValuesAre31Bits() ? BitsetMins31 : BitsetMins32;
- }
- static size_t BitsetMinsSize() {
- return i::SmiValuesAre31Bits() ? 7 : 5;
- /* arraysize(BitsetMins31) : arraysize(BitsetMins32); */
- // Using arraysize here doesn't compile on Windows.
- }
+ static const Boundary BoundariesArray[];
+ static inline const Boundary* Boundaries();
+ static inline size_t BoundariesSize();
+
+ static void CheckNumberBits(bitset bits);
};
@@ -686,7 +708,6 @@ class TypeImpl<Config>::StructuralType : public TypeImpl<Config> {
enum Tag {
kClassTag,
kConstantTag,
- kRangeTag,
kContextTag,
kArrayTag,
kFunctionTag,
@@ -812,28 +833,33 @@ class TypeImpl<Config>::ConstantType : public StructuralType {
// -----------------------------------------------------------------------------
// Range types.
-template<class Config>
-class TypeImpl<Config>::RangeType : public StructuralType {
+template <class Config>
+class TypeImpl<Config>::RangeType : public TypeImpl<Config> {
public:
- int BitsetLub() { return this->Get(0)->AsBitset(); }
- i::Handle<i::Object> Min() { return this->template GetValue<i::Object>(1); }
- i::Handle<i::Object> Max() { return this->template GetValue<i::Object>(2); }
-
- static RangeHandle New(
- i::Handle<i::Object> min, i::Handle<i::Object> max, Region* region) {
- DCHECK(IsInteger(min->Number()) && IsInteger(max->Number()));
- DCHECK(min->Number() <= max->Number());
- RangeHandle type = Config::template cast<RangeType>(
- StructuralType::New(StructuralType::kRangeTag, 3, region));
- type->Set(0, BitsetType::New(
- BitsetType::Lub(min->Number(), max->Number()), region));
- type->SetValue(1, min);
- type->SetValue(2, max);
- return type;
+ bitset Bound() { return Config::range_get_bitset(Config::as_range(this)); }
+ double Min() { return Config::range_get_double(Config::as_range(this), 0); }
+ double Max() { return Config::range_get_double(Config::as_range(this), 1); }
+
+ static RangeHandle New(double min, double max, TypeHandle representation,
+ Region* region) {
+ DCHECK(IsInteger(min) && IsInteger(max));
+ DCHECK(min <= max);
+ bitset representation_bits = representation->AsBitset();
+ DCHECK(REPRESENTATION(representation_bits) == representation_bits);
+
+ typename Config::template Handle<typename Config::Range>::type range =
+ Config::range_create(region);
+
+ bitset bits = SEMANTIC(BitsetType::Lub(min, max)) | representation_bits;
+ Config::range_set_bitset(range, bits);
+ Config::range_set_double(range, 0, min, region);
+ Config::range_set_double(range, 1, max, region);
+ return Config::template cast<RangeType>(Config::from_range(range));
}
- static RangeHandle New(Limits lim, Region* region) {
- return New(lim.min, lim.max, region);
+ static RangeHandle New(Limits lim, bitset representation, Region* region) {
+ return New(lim.min, lim.max, BitsetType::New(representation, region),
+ region);
}
static RangeType* cast(TypeImpl* type) {
@@ -842,7 +868,6 @@ class TypeImpl<Config>::RangeType : public StructuralType {
}
};
// TODO(neis): Also cache min and max values.
-// TODO(neis): Allow restricting the representation.
// -----------------------------------------------------------------------------
@@ -952,9 +977,19 @@ struct ZoneTypeConfig {
typedef TypeImpl<ZoneTypeConfig> Type;
class Base {};
typedef void* Struct;
+ // Hack: the Struct and Range types can be aliased in memory, the first
+ // pointer word of each both must be the tag (kRangeStructTag for Range,
+ // anything else for Struct) so that we can differentiate them.
+ struct Range {
+ void* tag;
+ int bitset;
+ double limits[2];
+ };
typedef i::Zone Region;
template<class T> struct Handle { typedef T* type; };
+ static const int kRangeStructTag = 0x1000;
+
template<class T> static inline T* null_handle();
template<class T> static inline T* handle(T* type);
template<class T> static inline T* cast(Type* type);
@@ -962,15 +997,18 @@ struct ZoneTypeConfig {
static inline bool is_bitset(Type* type);
static inline bool is_class(Type* type);
static inline bool is_struct(Type* type, int tag);
+ static inline bool is_range(Type* type);
static inline Type::bitset as_bitset(Type* type);
static inline i::Handle<i::Map> as_class(Type* type);
static inline Struct* as_struct(Type* type);
+ static inline Range* as_range(Type* type);
static inline Type* from_bitset(Type::bitset);
static inline Type* from_bitset(Type::bitset, Zone* zone);
static inline Type* from_class(i::Handle<i::Map> map, Zone* zone);
static inline Type* from_struct(Struct* structured);
+ static inline Type* from_range(Range* range);
static inline Struct* struct_create(int tag, int length, Zone* zone);
static inline void struct_shrink(Struct* structure, int length);
@@ -982,6 +1020,12 @@ struct ZoneTypeConfig {
static inline i::Handle<V> struct_get_value(Struct* structure, int i);
template<class V> static inline void struct_set_value(
Struct* structure, int i, i::Handle<V> x);
+
+ static inline Range* range_create(Zone* zone);
+ static inline int range_get_bitset(Range* range);
+ static inline void range_set_bitset(Range* range, int);
+ static inline double range_get_double(Range*, int index);
+ static inline void range_set_double(Range*, int index, double value, Zone*);
};
typedef TypeImpl<ZoneTypeConfig> Type;
@@ -995,9 +1039,12 @@ struct HeapTypeConfig {
typedef TypeImpl<HeapTypeConfig> Type;
typedef i::Object Base;
typedef i::FixedArray Struct;
+ typedef i::FixedArray Range;
typedef i::Isolate Region;
template<class T> struct Handle { typedef i::Handle<T> type; };
+ static const int kRangeStructTag = 0xffff;
+
template<class T> static inline i::Handle<T> null_handle();
template<class T> static inline i::Handle<T> handle(T* type);
template<class T> static inline i::Handle<T> cast(i::Handle<Type> type);
@@ -1005,16 +1052,19 @@ struct HeapTypeConfig {
static inline bool is_bitset(Type* type);
static inline bool is_class(Type* type);
static inline bool is_struct(Type* type, int tag);
+ static inline bool is_range(Type* type);
static inline Type::bitset as_bitset(Type* type);
static inline i::Handle<i::Map> as_class(Type* type);
static inline i::Handle<Struct> as_struct(Type* type);
+ static inline i::Handle<Range> as_range(Type* type);
static inline Type* from_bitset(Type::bitset);
static inline i::Handle<Type> from_bitset(Type::bitset, Isolate* isolate);
static inline i::Handle<Type> from_class(
i::Handle<i::Map> map, Isolate* isolate);
static inline i::Handle<Type> from_struct(i::Handle<Struct> structure);
+ static inline i::Handle<Type> from_range(i::Handle<Range> range);
static inline i::Handle<Struct> struct_create(
int tag, int length, Isolate* isolate);
@@ -1030,6 +1080,13 @@ struct HeapTypeConfig {
template<class V>
static inline void struct_set_value(
i::Handle<Struct> structure, int i, i::Handle<V> x);
+
+ static inline i::Handle<Range> range_create(Isolate* isolate);
+ static inline int range_get_bitset(i::Handle<Range> range);
+ static inline void range_set_bitset(i::Handle<Range> range, int value);
+ static inline double range_get_double(i::Handle<Range> range, int index);
+ static inline void range_set_double(i::Handle<Range> range, int index,
+ double value, Isolate* isolate);
};
typedef TypeImpl<HeapTypeConfig> HeapType;
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index 17deb6db42..48528705bf 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -16,13 +16,12 @@ namespace internal {
AstTyper::AstTyper(CompilationInfo* info)
: info_(info),
- oracle_(
- handle(info->closure()->shared()->code()),
- handle(info->closure()->shared()->feedback_vector()),
- handle(info->closure()->context()->native_context()),
- info->zone()),
+ oracle_(info->isolate(), info->zone(),
+ handle(info->closure()->shared()->code()),
+ handle(info->closure()->shared()->feedback_vector()),
+ handle(info->closure()->context()->native_context())),
store_(info->zone()) {
- InitializeAstVisitor(info->zone());
+ InitializeAstVisitor(info->isolate(), info->zone());
}
@@ -408,7 +407,9 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
!CompileTimeValue::IsCompileTimeValue(prop->value())) ||
prop->kind() == ObjectLiteral::Property::COMPUTED) {
- if (prop->key()->value()->IsInternalizedString() && prop->emit_store()) {
+ if (!prop->is_computed_name() &&
+ prop->key()->AsLiteral()->value()->IsInternalizedString() &&
+ prop->emit_store()) {
prop->RecordTypeFeedback(oracle());
}
}
@@ -531,8 +532,8 @@ void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
RECURSE(Visit(expr->expression()));
bool is_uninitialized = true;
- if (expr->IsUsingCallFeedbackSlot(isolate())) {
- FeedbackVectorICSlot slot = expr->CallFeedbackSlot();
+ if (expr->IsUsingCallFeedbackICSlot(isolate())) {
+ FeedbackVectorICSlot slot = expr->CallFeedbackICSlot();
is_uninitialized = oracle()->CallIsUninitialized(slot);
if (!expr->expression()->IsProperty() &&
oracle()->CallIsMonomorphic(slot)) {
@@ -796,10 +797,6 @@ void AstTyper::VisitModuleLiteral(ModuleLiteral* module) {
}
-void AstTyper::VisitModuleVariable(ModuleVariable* module) {
-}
-
-
void AstTyper::VisitModulePath(ModulePath* module) {
RECURSE(Visit(module->module()));
}
diff --git a/deps/v8/src/typing.h b/deps/v8/src/typing.h
index b56c878ba8..34649775fb 100644
--- a/deps/v8/src/typing.h
+++ b/deps/v8/src/typing.h
@@ -9,7 +9,6 @@
#include "src/allocation.h"
#include "src/ast.h"
-#include "src/compiler.h"
#include "src/effects.h"
#include "src/scopes.h"
#include "src/type-info.h"
@@ -24,9 +23,7 @@ class AstTyper: public AstVisitor {
public:
static void Run(CompilationInfo* info);
- void* operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
- }
+ void* operator new(size_t size, Zone* zone) { return zone->New(size); }
void operator delete(void* pointer, Zone* zone) { }
void operator delete(void* pointer) { }
diff --git a/deps/v8/src/unicode-decoder.cc b/deps/v8/src/unicode-decoder.cc
index 88eff3ad26..a3bf829522 100644
--- a/deps/v8/src/unicode-decoder.cc
+++ b/deps/v8/src/unicode-decoder.cc
@@ -10,16 +10,16 @@
namespace unibrow {
-void Utf8DecoderBase::Reset(uint16_t* buffer, unsigned buffer_length,
- const uint8_t* stream, unsigned stream_length) {
+void Utf8DecoderBase::Reset(uint16_t* buffer, size_t buffer_length,
+ const uint8_t* stream, size_t stream_length) {
// Assume everything will fit in the buffer and stream won't be needed.
last_byte_of_buffer_unused_ = false;
unbuffered_start_ = NULL;
bool writing_to_buffer = true;
// Loop until stream is read, writing to buffer as long as buffer has space.
- unsigned utf16_length = 0;
+ size_t utf16_length = 0;
while (stream_length != 0) {
- unsigned cursor = 0;
+ size_t cursor = 0;
uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
DCHECK(cursor > 0 && cursor <= stream_length);
stream += cursor;
@@ -56,9 +56,9 @@ void Utf8DecoderBase::Reset(uint16_t* buffer, unsigned buffer_length,
void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream, uint16_t* data,
- unsigned data_length) {
+ size_t data_length) {
while (data_length != 0) {
- unsigned cursor = 0;
+ size_t cursor = 0;
uint32_t character = Utf8::ValueOf(stream, Utf8::kMaxEncodedSize, &cursor);
// There's a total lack of bounds checking for stream
// as it was already done in Reset.
diff --git a/deps/v8/src/unicode-decoder.h b/deps/v8/src/unicode-decoder.h
index 35ea30cf1a..bfb14a3855 100644
--- a/deps/v8/src/unicode-decoder.h
+++ b/deps/v8/src/unicode-decoder.h
@@ -14,32 +14,32 @@ class Utf8DecoderBase {
public:
// Initialization done in subclass.
inline Utf8DecoderBase();
- inline Utf8DecoderBase(uint16_t* buffer, unsigned buffer_length,
- const uint8_t* stream, unsigned stream_length);
- inline unsigned Utf16Length() const { return utf16_length_; }
+ inline Utf8DecoderBase(uint16_t* buffer, size_t buffer_length,
+ const uint8_t* stream, size_t stream_length);
+ inline size_t Utf16Length() const { return utf16_length_; }
protected:
// This reads all characters and sets the utf16_length_.
// The first buffer_length utf16 chars are cached in the buffer.
- void Reset(uint16_t* buffer, unsigned buffer_length, const uint8_t* stream,
- unsigned stream_length);
+ void Reset(uint16_t* buffer, size_t buffer_length, const uint8_t* stream,
+ size_t stream_length);
static void WriteUtf16Slow(const uint8_t* stream, uint16_t* data,
- unsigned length);
+ size_t length);
const uint8_t* unbuffered_start_;
- unsigned utf16_length_;
+ size_t utf16_length_;
bool last_byte_of_buffer_unused_;
private:
DISALLOW_COPY_AND_ASSIGN(Utf8DecoderBase);
};
-template <unsigned kBufferSize>
+template <size_t kBufferSize>
class Utf8Decoder : public Utf8DecoderBase {
public:
inline Utf8Decoder() {}
- inline Utf8Decoder(const char* stream, unsigned length);
- inline void Reset(const char* stream, unsigned length);
- inline unsigned WriteUtf16(uint16_t* data, unsigned length) const;
+ inline Utf8Decoder(const char* stream, size_t length);
+ inline void Reset(const char* stream, size_t length);
+ inline size_t WriteUtf16(uint16_t* data, size_t length) const;
private:
uint16_t buffer_[kBufferSize];
@@ -52,35 +52,34 @@ Utf8DecoderBase::Utf8DecoderBase()
last_byte_of_buffer_unused_(false) {}
-Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer, unsigned buffer_length,
- const uint8_t* stream,
- unsigned stream_length) {
+Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer, size_t buffer_length,
+ const uint8_t* stream, size_t stream_length) {
Reset(buffer, buffer_length, stream, stream_length);
}
-template <unsigned kBufferSize>
-Utf8Decoder<kBufferSize>::Utf8Decoder(const char* stream, unsigned length)
+template <size_t kBufferSize>
+Utf8Decoder<kBufferSize>::Utf8Decoder(const char* stream, size_t length)
: Utf8DecoderBase(buffer_, kBufferSize,
reinterpret_cast<const uint8_t*>(stream), length) {}
-template <unsigned kBufferSize>
-void Utf8Decoder<kBufferSize>::Reset(const char* stream, unsigned length) {
+template <size_t kBufferSize>
+void Utf8Decoder<kBufferSize>::Reset(const char* stream, size_t length) {
Utf8DecoderBase::Reset(buffer_, kBufferSize,
reinterpret_cast<const uint8_t*>(stream), length);
}
-template <unsigned kBufferSize>
-unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
- unsigned length) const {
+template <size_t kBufferSize>
+size_t Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
+ size_t length) const {
DCHECK(length > 0);
if (length > utf16_length_) length = utf16_length_;
// memcpy everything in buffer.
- unsigned buffer_length =
+ size_t buffer_length =
last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
- unsigned memcpy_length = length <= buffer_length ? length : buffer_length;
+ size_t memcpy_length = length <= buffer_length ? length : buffer_length;
v8::internal::MemCopy(data, buffer_, memcpy_length * sizeof(uint16_t));
if (length <= buffer_length) return length;
DCHECK(unbuffered_start_ != NULL);
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index 0f78d39e06..b22e482528 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -110,7 +110,7 @@ unsigned Utf8::Encode(char* str,
}
-uchar Utf8::ValueOf(const byte* bytes, unsigned length, unsigned* cursor) {
+uchar Utf8::ValueOf(const byte* bytes, size_t length, size_t* cursor) {
if (length <= 0) return kBadChar;
byte first = bytes[0];
// Characters between 0000 and 0007F are encoded as a single character
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 26a336afd8..0d0d63d177 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -190,9 +190,7 @@ static int LookupMapping(const int32_t* table,
}
-uchar Utf8::CalculateValue(const byte* str,
- unsigned length,
- unsigned* cursor) {
+uchar Utf8::CalculateValue(const byte* str, size_t length, size_t* cursor) {
// We only get called for non-ASCII characters.
if (length == 1) {
*cursor += 1;
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 166681426f..7471a638c0 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -136,9 +136,7 @@ class Utf8 {
uchar c,
int previous,
bool replace_invalid = false);
- static uchar CalculateValue(const byte* str,
- unsigned length,
- unsigned* cursor);
+ static uchar CalculateValue(const byte* str, size_t length, size_t* cursor);
// The unicode replacement character, used to signal invalid unicode
// sequences (e.g. an orphan surrogate) when converting to a UTF-8 encoding.
@@ -156,9 +154,7 @@ class Utf8 {
// The maximum size a single UTF-16 code unit may take up when encoded as
// UTF-8.
static const unsigned kMax16BitCodeUnitSize = 3;
- static inline uchar ValueOf(const byte* str,
- unsigned length,
- unsigned* cursor);
+ static inline uchar ValueOf(const byte* str, size_t length, size_t* cursor);
};
struct Uppercase {
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
index 321eb3683d..b56ee84a33 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/unique.h
@@ -49,7 +49,7 @@ class Unique {
// TODO(titzer): other immortable immovable objects are also fine.
DCHECK(!AllowHeapAllocation::IsAllowed() || handle->IsMap());
raw_address_ = reinterpret_cast<Address>(*handle);
- DCHECK_NE(raw_address_, NULL); // Non-null should imply non-zero address.
+ DCHECK_NOT_NULL(raw_address_); // Non-null should imply non-zero address.
}
handle_ = handle;
}
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 40c8b404fd..c6fe55b5c6 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -408,9 +408,9 @@ void init_memcopy_functions() {
bool DoubleToBoolean(double d) {
// NaN, +0, and -0 should return the false object
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if V8_TARGET_LITTLE_ENDIAN
union IeeeDoubleLittleEndianArchType u;
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#else
union IeeeDoubleBigEndianArchType u;
#endif
u.d = d;
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 525c6f87d3..1c9e3a6824 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -27,6 +27,9 @@ namespace internal {
// General helper functions
+inline int BoolToInt(bool b) { return b ? 1 : 0; }
+
+
// Same as strcmp, but can handle NULL arguments.
inline bool CStringEquals(const char* s1, const char* s2) {
return (s1 == s2) || (s1 != NULL && s2 != NULL && strcmp(s1, s2) == 0);
@@ -730,9 +733,8 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
// Compare 8bit/16bit chars to 8bit/16bit chars.
template <typename lchar, typename rchar>
-inline int CompareCharsUnsigned(const lchar* lhs,
- const rchar* rhs,
- int chars) {
+inline int CompareCharsUnsigned(const lchar* lhs, const rchar* rhs,
+ size_t chars) {
const lchar* limit = lhs + chars;
if (sizeof(*lhs) == sizeof(char) && sizeof(*rhs) == sizeof(char)) {
// memcmp compares byte-by-byte, yielding wrong results for two-byte
@@ -748,8 +750,8 @@ inline int CompareCharsUnsigned(const lchar* lhs,
return 0;
}
-template<typename lchar, typename rchar>
-inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+template <typename lchar, typename rchar>
+inline int CompareChars(const lchar* lhs, const rchar* rhs, size_t chars) {
DCHECK(sizeof(lchar) <= 2);
DCHECK(sizeof(rchar) <= 2);
if (sizeof(lchar) == 1) {
@@ -1314,24 +1316,30 @@ Vector<const char> ReadFile(FILE* file,
template <typename sourcechar, typename sinkchar>
-INLINE(static void CopyCharsUnsigned(sinkchar* dest,
- const sourcechar* src,
- int chars));
+INLINE(static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src,
+ size_t chars));
#if defined(V8_HOST_ARCH_ARM)
-INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
-INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
-INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src,
+ size_t chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
+ size_t chars));
#elif defined(V8_HOST_ARCH_MIPS)
-INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
-INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
+ size_t chars));
+#elif defined(V8_HOST_ARCH_PPC)
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
+ size_t chars));
#endif
// Copy from 8bit/16bit chars to 8bit/16bit chars.
template <typename sourcechar, typename sinkchar>
-INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
+INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars));
-template<typename sourcechar, typename sinkchar>
-void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+template <typename sourcechar, typename sinkchar>
+void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars) {
DCHECK(sizeof(sourcechar) <= 2);
DCHECK(sizeof(sinkchar) <= 2);
if (sizeof(sinkchar) == 1) {
@@ -1358,7 +1366,7 @@ void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
}
template <typename sourcechar, typename sinkchar>
-void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
+void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) {
sinkchar* limit = dest + chars;
if ((sizeof(*dest) == sizeof(*src)) &&
(chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) {
@@ -1370,7 +1378,7 @@ void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
#if defined(V8_HOST_ARCH_ARM)
-void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
switch (static_cast<unsigned>(chars)) {
case 0:
break;
@@ -1426,8 +1434,8 @@ void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
}
-void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
- if (chars >= kMinComplexConvertMemCopy) {
+void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, size_t chars) {
+ if (chars >= static_cast<size_t>(kMinComplexConvertMemCopy)) {
MemCopyUint16Uint8(dest, src, chars);
} else {
MemCopyUint16Uint8Wrapper(dest, src, chars);
@@ -1435,7 +1443,7 @@ void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
}
-void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
switch (static_cast<unsigned>(chars)) {
case 0:
break;
@@ -1468,7 +1476,7 @@ void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
#elif defined(V8_HOST_ARCH_MIPS)
-void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
if (chars < kMinComplexMemCopy) {
memcpy(dest, src, chars);
} else {
@@ -1476,13 +1484,143 @@ void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
}
}
-void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
if (chars < kMinComplexMemCopy) {
memcpy(dest, src, chars * sizeof(*dest));
} else {
MemCopy(dest, src, chars * sizeof(*dest));
}
}
+#elif defined(V8_HOST_ARCH_PPC)
+#define CASE(n) \
+ case n: \
+ memcpy(dest, src, n); \
+ break
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ CASE(2);
+ CASE(3);
+ CASE(4);
+ CASE(5);
+ CASE(6);
+ CASE(7);
+ CASE(8);
+ CASE(9);
+ CASE(10);
+ CASE(11);
+ CASE(12);
+ CASE(13);
+ CASE(14);
+ CASE(15);
+ CASE(16);
+ CASE(17);
+ CASE(18);
+ CASE(19);
+ CASE(20);
+ CASE(21);
+ CASE(22);
+ CASE(23);
+ CASE(24);
+ CASE(25);
+ CASE(26);
+ CASE(27);
+ CASE(28);
+ CASE(29);
+ CASE(30);
+ CASE(31);
+ CASE(32);
+ CASE(33);
+ CASE(34);
+ CASE(35);
+ CASE(36);
+ CASE(37);
+ CASE(38);
+ CASE(39);
+ CASE(40);
+ CASE(41);
+ CASE(42);
+ CASE(43);
+ CASE(44);
+ CASE(45);
+ CASE(46);
+ CASE(47);
+ CASE(48);
+ CASE(49);
+ CASE(50);
+ CASE(51);
+ CASE(52);
+ CASE(53);
+ CASE(54);
+ CASE(55);
+ CASE(56);
+ CASE(57);
+ CASE(58);
+ CASE(59);
+ CASE(60);
+ CASE(61);
+ CASE(62);
+ CASE(63);
+ CASE(64);
+ default:
+ memcpy(dest, src, chars);
+ break;
+ }
+}
+#undef CASE
+
+#define CASE(n) \
+ case n: \
+ memcpy(dest, src, n * 2); \
+ break
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
+ switch (static_cast<unsigned>(chars)) {
+ case 0:
+ break;
+ case 1:
+ *dest = *src;
+ break;
+ CASE(2);
+ CASE(3);
+ CASE(4);
+ CASE(5);
+ CASE(6);
+ CASE(7);
+ CASE(8);
+ CASE(9);
+ CASE(10);
+ CASE(11);
+ CASE(12);
+ CASE(13);
+ CASE(14);
+ CASE(15);
+ CASE(16);
+ CASE(17);
+ CASE(18);
+ CASE(19);
+ CASE(20);
+ CASE(21);
+ CASE(22);
+ CASE(23);
+ CASE(24);
+ CASE(25);
+ CASE(26);
+ CASE(27);
+ CASE(28);
+ CASE(29);
+ CASE(30);
+ CASE(31);
+ CASE(32);
+ default:
+ memcpy(dest, src, chars * 2);
+ break;
+ }
+}
+#undef CASE
#endif
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index b04ccc07c7..495921eeb1 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -8,7 +8,6 @@
#include "src/base/once.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
-#include "src/compiler/pipeline.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
@@ -50,7 +49,6 @@ void V8::TearDown() {
Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
- compiler::Pipeline::TearDown();
ExternalReference::TearDownMathExpData();
RegisteredExtension::UnregisterAll();
Isolate::GlobalTearDown();
@@ -94,7 +92,6 @@ void V8::InitializeOncePerProcessImpl() {
#endif
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
- compiler::Pipeline::SetUp();
SetUpJSCallerSavedCodeData();
ExternalReference::SetUp();
Bootstrapper::InitializeOncePerProcess();
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 4267434e4b..211f3c6141 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -44,7 +44,6 @@
#include "src/log-inl.h" // NOLINT
#include "src/handles-inl.h" // NOLINT
#include "src/types-inl.h" // NOLINT
-#include "src/zone-inl.h" // NOLINT
namespace v8 {
namespace internal {
@@ -57,6 +56,7 @@ class V8 : public AllStatic {
static void TearDown();
// Report process out of memory. Implementation found in api.cc.
+ // This function will not return, but will terminate the execution.
static void FatalProcessOutOfMemory(const char* location,
bool take_snapshot = false);
@@ -74,7 +74,7 @@ class V8 : public AllStatic {
}
static void SetArrayBufferAllocator(v8::ArrayBuffer::Allocator *allocator) {
- CHECK_EQ(NULL, array_buffer_allocator_);
+ CHECK_NULL(array_buffer_allocator_);
array_buffer_allocator_ = allocator;
}
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index ac96f6391e..23143345f6 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -654,6 +654,15 @@ function Delete(obj, p, should_throw) {
}
+// ES6, draft 12-24-14, section 7.3.8
+function GetMethod(obj, p) {
+ var func = obj[p];
+ if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
+ if (IS_SPEC_FUNCTION(func)) return func;
+ throw MakeTypeError('called_non_callable', [typeof func]);
+}
+
+
// Harmony proxies.
function DefineProxyProperty(obj, p, attributes, should_throw) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 3b1559d864..2351e529a2 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -20,7 +20,6 @@ const char* Variable::Mode2String(VariableMode mode) {
case CONST_LEGACY: return "CONST_LEGACY";
case LET: return "LET";
case CONST: return "CONST";
- case MODULE: return "MODULE";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
@@ -35,7 +34,7 @@ const char* Variable::Mode2String(VariableMode mode) {
Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
bool is_valid_ref, Kind kind,
InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag, Interface* interface)
+ MaybeAssignedFlag maybe_assigned_flag)
: scope_(scope),
name_(name),
mode_(mode),
@@ -48,8 +47,7 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
force_context_allocation_(false),
is_used_(false),
initialization_flag_(initialization_flag),
- maybe_assigned_(maybe_assigned_flag),
- interface_(interface) {
+ maybe_assigned_(maybe_assigned_flag) {
// Var declared variables never need initialization.
DCHECK(!(mode == VAR && initialization_flag == kNeedsInitialization));
}
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 93bfb4a181..1adeb1f0f4 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -6,7 +6,6 @@
#define V8_VARIABLES_H_
#include "src/ast-value-factory.h"
-#include "src/interface.h"
#include "src/zone.h"
namespace v8 {
@@ -19,11 +18,7 @@ namespace internal {
class Variable: public ZoneObject {
public:
- enum Kind {
- NORMAL,
- THIS,
- ARGUMENTS
- };
+ enum Kind { NORMAL, THIS, NEW_TARGET, ARGUMENTS };
enum Location {
// Before and during variable allocation, a variable whose location is
@@ -54,8 +49,7 @@ class Variable: public ZoneObject {
Variable(Scope* scope, const AstRawString* name, VariableMode mode,
bool is_valid_ref, Kind kind, InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
- Interface* interface = Interface::NewValue());
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
// Printing support
static const char* Mode2String(VariableMode mode);
@@ -105,6 +99,7 @@ class Variable: public ZoneObject {
}
bool is_this() const { return kind_ == THIS; }
+ bool is_new_target() const { return kind_ == NEW_TARGET; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
// True if the variable is named eval and not known to be shadowed.
@@ -126,7 +121,6 @@ class Variable: public ZoneObject {
InitializationFlag initialization_flag() const {
return initialization_flag_;
}
- Interface* interface() const { return interface_; }
void AllocateTo(Location location, int index) {
location_ = location;
@@ -158,9 +152,6 @@ class Variable: public ZoneObject {
bool is_used_;
InitializationFlag initialization_flag_;
MaybeAssignedFlag maybe_assigned_;
-
- // Module type info.
- Interface* interface_;
};
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index a4fdb1060e..895c61b4ec 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -31,9 +31,9 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
- SLOW_DCHECK(to <= length_);
- SLOW_DCHECK(from < to);
DCHECK(0 <= from);
+ SLOW_DCHECK(from < to);
+ SLOW_DCHECK(static_cast<unsigned>(to) <= static_cast<unsigned>(length_));
return Vector<T>(start() + from, to - from);
}
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index caf7af6ec9..64c71cfaee 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -292,7 +292,7 @@ void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
- Memory::Address_at(pc_) += static_cast<int32_t>(delta);
+ Memory::Address_at(pc_) += delta;
if (flush_icache) CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
@@ -621,7 +621,12 @@ void Operand::set_disp32(int disp) {
len_ += sizeof(int32_t);
}
-
+void Operand::set_disp64(int64_t disp) {
+ DCHECK_EQ(1, len_);
+ int64_t* p = reinterpret_cast<int64_t*>(&buf_[len_]);
+ *p = disp;
+ len_ += sizeof(disp);
+}
} } // namespace v8::internal
#endif // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index fd722b23bd..dcfa01fd0e 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -4,12 +4,17 @@
#include "src/x64/assembler-x64.h"
+#include <cstring>
+
+#if V8_TARGET_ARCH_X64
+
+#if V8_LIBC_MSVCRT
+#include <intrin.h> // _xgetbv()
+#endif
#if V8_OS_MACOSX
#include <sys/sysctl.h>
#endif
-#if V8_TARGET_ARCH_X64
-
#include "src/base/bits.h"
#include "src/macro-assembler.h"
#include "src/v8.h"
@@ -22,22 +27,44 @@ namespace internal {
namespace {
-bool EnableAVX() {
+#if !V8_LIBC_MSVCRT
+
+V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
+ unsigned eax, edx;
+ // Check xgetbv; this uses a .byte sequence instead of the instruction
+ // directly because older assemblers do not include support for xgetbv and
+ // there is no easy way to conditionally compile based on the assembler
+ // used.
+ __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32);
+}
+
+#define _XCR_XFEATURE_ENABLED_MASK 0
+
+#endif // !V8_LIBC_MSVCRT
+
+
+bool OSHasAVXSupport() {
#if V8_OS_MACOSX
- // Mac OS X 10.9 has a bug where AVX transitions were indeed being caused by
- // ISRs, so we detect Mac OS X 10.9 here and disable AVX in that case.
+ // Mac OS X up to 10.9 has a bug where AVX transitions were indeed being
+ // caused by ISRs, so we detect that here and disable AVX in that case.
char buffer[128];
size_t buffer_size = arraysize(buffer);
- int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+ int ctl_name[] = {CTL_KERN, KERN_OSRELEASE};
if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
- // XX is the major kernel version component. 13.x.x (Mavericks) is
- // affected by this bug, so disable AVX there.
- if (memcmp(buffer, "13.", 3) == 0) return false;
+ // XX is the major kernel version component.
+ char* period_pos = strchr(buffer, '.');
+ DCHECK_NOT_NULL(period_pos);
+ *period_pos = '\0';
+ long kernel_version_major = strtol(buffer, nullptr, 10); // NOLINT
+ if (kernel_version_major <= 13) return false;
#endif // V8_OS_MACOSX
- return FLAG_enable_avx;
+ // Check whether OS claims to support AVX.
+ uint64_t feature_mask = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
+ return (feature_mask & 0x6) == 0x6;
}
} // namespace
@@ -55,17 +82,28 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
// SAHF is not generally available in long mode.
if (cpu.has_sahf() && FLAG_enable_sahf) supported_ |= 1u << SAHF;
- if (cpu.has_avx() && EnableAVX()) supported_ |= 1u << AVX;
- if (cpu.has_fma3() && FLAG_enable_fma3) supported_ |= 1u << FMA3;
+ if (cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
+ OSHasAVXSupport()) {
+ supported_ |= 1u << AVX;
+ }
+ if (cpu.has_fma3() && FLAG_enable_fma3 && cpu.has_osxsave() &&
+ OSHasAVXSupport()) {
+ supported_ |= 1u << FMA3;
+ }
+ if (strcmp(FLAG_mcpu, "auto") == 0) {
+ if (cpu.is_atom()) supported_ |= 1u << ATOM;
+ } else if (strcmp(FLAG_mcpu, "atom") == 0) {
+ supported_ |= 1u << ATOM;
+ }
}
void CpuFeatures::PrintTarget() { }
void CpuFeatures::PrintFeatures() {
- printf("SSE3=%d SSE4_1=%d SAHF=%d AVX=%d FMA3=%d\n",
+ printf("SSE3=%d SSE4_1=%d SAHF=%d AVX=%d FMA3=%d ATOM=%d\n",
CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSE4_1),
CpuFeatures::IsSupported(SAHF), CpuFeatures::IsSupported(AVX),
- CpuFeatures::IsSupported(FMA3));
+ CpuFeatures::IsSupported(FMA3), CpuFeatures::IsSupported(ATOM));
}
@@ -181,6 +219,13 @@ Operand::Operand(Register index,
}
+Operand::Operand(Label* label) : rex_(0), len_(1) {
+ DCHECK_NOT_NULL(label);
+ set_modrm(0, rbp);
+ set_disp64(reinterpret_cast<intptr_t>(label));
+}
+
+
Operand::Operand(const Operand& operand, int32_t offset) {
DCHECK(operand.len_ >= 1);
// Operand encodes REX ModR/M [SIB] [Disp].
@@ -287,6 +332,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
+ reloc_info_writer.Finish();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -327,15 +373,30 @@ void Assembler::bind_to(Label* L, int pos) {
int current = L->pos();
int next = long_at(current);
while (next != current) {
- // Relative address, relative to point after address.
- int imm32 = pos - (current + sizeof(int32_t));
- long_at_put(current, imm32);
+ if (current >= 4 && long_at(current - 4) == 0) {
+ // Absolute address.
+ intptr_t imm64 = reinterpret_cast<intptr_t>(buffer_ + pos);
+ *reinterpret_cast<intptr_t*>(addr_at(current - 4)) = imm64;
+ internal_reference_positions_.push_back(current - 4);
+ } else {
+ // Relative address, relative to point after address.
+ int imm32 = pos - (current + sizeof(int32_t));
+ long_at_put(current, imm32);
+ }
current = next;
next = long_at(next);
}
// Fix up last fixup on linked list.
- int last_imm32 = pos - (current + sizeof(int32_t));
- long_at_put(current, last_imm32);
+ if (current >= 4 && long_at(current - 4) == 0) {
+ // Absolute address.
+ intptr_t imm64 = reinterpret_cast<intptr_t>(buffer_ + pos);
+ *reinterpret_cast<intptr_t*>(addr_at(current - 4)) = imm64;
+ internal_reference_positions_.push_back(current - 4);
+ } else {
+ // Relative address, relative to point after address.
+ int imm32 = pos - (current + sizeof(int32_t));
+ long_at_put(current, imm32);
+ }
}
while (L->is_near_linked()) {
int fixup_pos = L->near_link_pos();
@@ -403,15 +464,10 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- intptr_t* p = reinterpret_cast<intptr_t*>(it.rinfo()->pc());
- if (*p != 0) { // 0 means uninitialized.
- *p += pc_delta;
- }
- }
+ // Relocate internal references.
+ for (auto pos : internal_reference_positions_) {
+ intptr_t* p = reinterpret_cast<intptr_t*>(buffer_ + pos);
+ *p += pc_delta;
}
DCHECK(!buffer_overflow());
@@ -425,11 +481,29 @@ void Assembler::emit_operand(int code, const Operand& adr) {
// Emit updated ModR/M byte containing the given register.
DCHECK((adr.buf_[0] & 0x38) == 0);
- pc_[0] = adr.buf_[0] | code << 3;
-
- // Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
+ *pc_++ = adr.buf_[0] | code << 3;
+
+ // Recognize RIP relative addressing.
+ if (adr.buf_[0] == 5) {
+ DCHECK_EQ(9u, length);
+ Label* label = *bit_cast<Label* const*>(&adr.buf_[1]);
+ if (label->is_bound()) {
+ int offset = label->pos() - pc_offset() - sizeof(int32_t);
+ DCHECK_GE(0, offset);
+ emitl(offset);
+ } else if (label->is_linked()) {
+ emitl(label->pos());
+ label->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ DCHECK(label->is_unused());
+ int32_t current = pc_offset();
+ emitl(current);
+ label->link_to(current);
+ }
+ } else {
+ // Emit the rest of the encoded operand.
+ for (unsigned i = 1; i < length; i++) *pc_++ = adr.buf_[i];
+ }
}
@@ -1777,6 +1851,13 @@ void Assembler::ret(int imm16) {
}
+void Assembler::ud2() {
+ EnsureSpace ensure_space(this);
+ emit(0x0F);
+ emit(0x0B);
+}
+
+
void Assembler::setcc(Condition cc, Register reg) {
if (cc > last_condition) {
movb(reg, Immediate(cc == always ? 1 : 0));
@@ -3309,6 +3390,27 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dq(Label* label) {
+ EnsureSpace ensure_space(this);
+ if (label->is_bound()) {
+ internal_reference_positions_.push_back(pc_offset());
+ emitp(buffer_ + label->pos(), RelocInfo::INTERNAL_REFERENCE);
+ } else {
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ emitl(0); // Zero for the first 32bit marks it as 64bit absolute address.
+ if (label->is_linked()) {
+ emitl(label->pos());
+ label->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ DCHECK(label->is_unused());
+ int32_t current = pc_offset();
+ emitl(current);
+ label->link_to(current);
+ }
+ }
+}
+
+
// Relocation information implementations.
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
@@ -3326,28 +3428,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
- if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 2ebae3bbe0..116816c872 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -37,6 +37,9 @@
#ifndef V8_X64_ASSEMBLER_X64_H_
#define V8_X64_ASSEMBLER_X64_H_
+#include <deque>
+
+#include "src/assembler.h"
#include "src/serialize.h"
namespace v8 {
@@ -408,6 +411,9 @@ class Operand BASE_EMBEDDED {
// this must not overflow.
Operand(const Operand& base, int32_t offset);
+ // [rip + disp/r]
+ explicit Operand(Label* label);
+
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
bool AddressUsesRegister(Register reg) const;
@@ -421,7 +427,7 @@ class Operand BASE_EMBEDDED {
private:
byte rex_;
- byte buf_[6];
+ byte buf_[9];
// The number of bytes of buf_ in use.
byte len_;
@@ -437,6 +443,7 @@ class Operand BASE_EMBEDDED {
// Needs to be called after set_sib, not before it.
inline void set_disp8(int disp);
inline void set_disp32(int disp);
+ inline void set_disp64(int64_t disp); // for labels.
friend class Assembler;
};
@@ -888,6 +895,7 @@ class Assembler : public AssemblerBase {
void int3();
void nop();
void ret(int imm16);
+ void ud2();
void setcc(Condition cc, Register reg);
// Label operations & relative jumps (PPUM Appendix D)
@@ -934,6 +942,7 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
+ void jmp(const Operand& src);
// Conditional jumps
void j(Condition cc,
@@ -1328,7 +1337,11 @@ class Assembler : public AssemblerBase {
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
- void RecordComment(const char* msg, bool force = false);
+ void RecordComment(const char* msg);
+
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, const int raw_position);
// Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
@@ -1340,6 +1353,7 @@ class Assembler : public AssemblerBase {
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dq(Label* label);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
@@ -1367,9 +1381,6 @@ class Assembler : public AssemblerBase {
// Call near indirect
void call(const Operand& operand);
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1807,6 +1818,11 @@ class Assembler : public AssemblerBase {
// code generation
RelocInfoWriter reloc_info_writer;
+ // Internal reference positions, required for (potential) patching in
+ // GrowBuffer(); contains only those internal references whose labels
+ // are already bound.
+ std::deque<int> internal_reference_positions_;
+
List< Handle<Code> > code_targets_;
PositionsRecorder positions_recorder_;
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index ef3df659eb..f43084b13f 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -99,6 +99,41 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
+static void Generate_Runtime_NewObject(MacroAssembler* masm,
+ bool create_memento,
+ Register original_constructor,
+ Label* count_incremented,
+ Label* allocated) {
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ movp(rdi, Operand(rsp, kPointerSize * 2));
+ __ Push(rdi);
+ offset = kPointerSize;
+ }
+
+ // Must restore rsi (context) and rdi (constructor) before calling runtime.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rdi, Operand(rsp, offset));
+ __ Push(rdi);
+ __ Push(original_constructor);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ movp(rbx, rax); // store result in rbx
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ if (create_memento) {
+ __ jmp(count_incremented);
+ } else {
+ __ jmp(allocated);
+ }
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
@@ -106,6 +141,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- rax: number of arguments
// -- rdi: constructor function
// -- rbx: allocation site or undefined
+ // -- rdx: original constructor
// -----------------------------------
// Should never create mementos for api functions.
@@ -127,9 +163,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push the function to invoke on the stack.
__ Push(rdi);
+ Label rt_call, normal_new, allocated, count_incremented;
+ __ cmpp(rdx, rdi);
+ __ j(equal, &normal_new);
+
+ Generate_Runtime_NewObject(masm, create_memento, rdx, &count_incremented,
+ &allocated);
+
+ __ bind(&normal_new);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
@@ -345,32 +388,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
- int offset = 0;
- if (create_memento) {
- // Get the cell or allocation site.
- __ movp(rdi, Operand(rsp, kPointerSize*2));
- __ Push(rdi);
- offset = kPointerSize;
- }
-
- // Must restore rsi (context) and rdi (constructor) before calling runtime.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movp(rdi, Operand(rsp, offset));
- __ Push(rdi);
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
- } else {
- __ CallRuntime(Runtime::kNewObject, 1);
- }
- __ movp(rbx, rax); // store result in rbx
-
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
+ Generate_Runtime_NewObject(masm, create_memento, rdi, &count_incremented,
+ &allocated);
// New object allocated.
// rbx: newly allocated object
@@ -479,6 +498,81 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax: number of arguments
+ // -- rdi: constructor function
+ // -- rbx: allocation site or undefined
+ // -- rdx: original constructor
+ // -----------------------------------
+ // TODO(dslomov): support pretenuring
+ CHECK(!FLAG_pretenuring_call_new);
+
+ {
+ FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+
+ // Store a smi-tagged arguments count on the stack.
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ SmiToInteger32(rax, rax);
+
+ // Push new.target
+ __ Push(rdx);
+
+ // receiver is the hole.
+ __ Push(masm->isolate()->factory()->the_hole_value());
+
+ // Set up pointer to last argument.
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ movp(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ Push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ bind(&entry);
+ __ decp(rcx);
+ __ j(greater_equal, &loop);
+
+ __ incp(rax); // Pushed new.target.
+
+ // Handle step in.
+ Label skip_step_in;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ Move(kScratchRegister, debug_step_in_fp);
+ __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
+ __ j(equal, &skip_step_in);
+
+ __ Push(rax);
+ __ Push(rdi);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
+ __ Pop(rdi);
+ __ Pop(rax);
+
+ __ bind(&skip_step_in);
+
+ // Call the function.
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
+
+ // Restore context from the frame.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ __ movp(rbx, Operand(rsp, 0)); // Get arguments count.
+ } // Leave construct frame.
+
+ // Remove caller arguments from the stack and return.
+ __ PopReturnAddressTo(rcx);
+ SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ ret(0);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -1178,6 +1272,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ Check(equal, kUnexpectedInitialMapForArrayFunction);
}
+ __ movp(rdx, rdi);
// Run the native code for the Array function called as a normal function.
// tail call a stub
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index f327b50085..2a28767010 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -539,6 +539,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ CHECK(!has_new_target());
// The key is in rdx and the parameter count is in rax.
DCHECK(rdx.is(ArgumentsAccessReadDescriptor::index()));
DCHECK(rax.is(ArgumentsAccessReadDescriptor::parameter_count()));
@@ -606,6 +607,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// rbx: the mapped parameter count (untagged)
// rax: the allocated object (tagged).
+ CHECK(!has_new_target());
+
Factory* factory = isolate()->factory();
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
@@ -819,6 +822,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
// rsp[24] : function
+ CHECK(!has_new_target());
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -841,6 +845,33 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
}
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // rsp[0] : return address
+ // rsp[8] : index of rest parameter
+ // rsp[16] : number of parameters
+ // rsp[24] : receiver displacement
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(args.GetArgumentOperand(1), rcx);
+ __ SmiToInteger64(rcx, rcx);
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ __ movp(args.GetArgumentOperand(0), rdx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+}
+
+
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is on the stack.
Label slow;
@@ -928,6 +959,13 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ if (has_new_target()) {
+ // Subtract 1 from smi-tagged arguments count.
+ __ SmiToInteger32(rcx, rcx);
+ __ decl(rcx);
+ __ Integer32ToSmi(rcx, rcx);
+ }
__ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ leap(rdx, Operand(rdx, rcx, times_pointer_size,
@@ -2017,6 +2055,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(rbx);
}
+ // Pass original constructor to construct stub.
+ if (IsSuperConstructorCall()) {
+ __ movp(rdx, Operand(rsp, rax, times_pointer_size, 2 * kPointerSize));
+ } else {
+ __ movp(rdx, rdi);
+ }
+
// Jump to the function-specific construct stub.
Register jmp_reg = rcx;
__ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -2056,11 +2101,11 @@ static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// rdi - function
// rdx - slot id (as integer)
+ // rbx - vector
Label miss;
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, rbx);
__ SmiToInteger32(rdx, rdx);
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
@@ -2077,6 +2122,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
__ movp(rbx, rcx);
+ __ movp(rdx, rdi);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@@ -2097,6 +2143,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
void CallICStub::Generate(MacroAssembler* masm) {
// rdi - function
// rdx - slot id
+ // rbx - vector
Isolate* isolate = masm->isolate();
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
@@ -2109,14 +2156,32 @@ void CallICStub::Generate(MacroAssembler* masm) {
StackArgumentsAccessor args(rsp, argc);
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, rbx);
-
// The checks. First, does rdi match the recorded monomorphic target?
__ SmiToInteger32(rdx, rdx);
- __ cmpp(rdi, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
+ __ movp(rcx,
+ FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ cmpp(rdi, FieldOperand(rcx, WeakCell::kValueOffset));
__ j(not_equal, &extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(rdi, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -2145,8 +2210,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&extra_checks_or_miss);
Label uninitialized, miss;
- __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
__ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
__ j(equal, &slow_start);
@@ -2189,14 +2252,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Update stats.
__ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
- // Store the function.
- __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
- rdi);
+ // Store the function. Use a stub since we need a frame for allocation.
+ // rbx - vector
+ // rdx - slot (needs to be in smi form)
+ // rdi - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(isolate);
+
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdi);
+ __ CallStub(&create_stub);
+ __ Pop(rdi);
+ }
- // Update the write barrier.
- __ movp(rax, rdi);
- __ RecordWriteArray(rbx, rax, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
__ jmp(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
@@ -2219,30 +2288,23 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movp(rcx, Operand(rsp, (arg_count() + 1) * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the function and feedback info.
- __ Push(rcx);
- __ Push(rdi);
- __ Push(rbx);
- __ Integer32ToSmi(rdx, rdx);
- __ Push(rdx);
+ // Push the receiver and the function and feedback info.
+ __ Push(rdi);
+ __ Push(rbx);
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdx);
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
- ExternalReference miss = ExternalReference(IC_Utility(id),
- masm->isolate());
- __ CallExternalReference(miss, 4);
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 3);
- // Move result to edi and exit the internal frame.
- __ movp(rdi, rax);
- }
+ // Move result to edi and exit the internal frame.
+ __ movp(rdi, rax);
}
@@ -2258,6 +2320,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the store buffer overflow stubs are generated first.
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -3716,14 +3779,16 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
+ __ GetWeakValue(rdi, cell);
__ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
__ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ Cmp(rcx, known_map_);
+ __ cmpp(rcx, rdi);
__ j(not_equal, &miss, Label::kNear);
- __ Cmp(rbx, known_map_);
+ __ cmpp(rbx, rdi);
__ j(not_equal, &miss, Label::kNear);
__ subp(rax, rdx);
@@ -4263,6 +4328,20 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
}
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, rbx);
+ CallICStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, rbx);
+ CallIC_ArrayStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4495,6 +4574,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- rax : argc
// -- rbx : AllocationSite or undefined
// -- rdi : constructor
+ // -- rdx : original constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
@@ -4515,6 +4595,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(rbx);
}
+ Label subclassing;
+ __ cmpp(rdi, rdx);
+ __ j(not_equal, &subclassing);
+
Label no_info;
// If the feedback vector is the undefined value call an array constructor
// that doesn't use AllocationSites.
@@ -4530,6 +4614,31 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ // Subclassing
+ __ bind(&subclassing);
+ __ Pop(rcx); // return address.
+ __ Push(rdi);
+ __ Push(rdx);
+
+ // Adjust argc.
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ addp(rax, Immediate(2));
+ break;
+ case NONE:
+ __ movp(rax, Immediate(2));
+ break;
+ case ONE:
+ __ movp(rax, Immediate(3));
+ break;
+ }
+
+ __ Push(rcx);
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()),
+ 1);
}
@@ -4621,14 +4730,205 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+static int Offset(ExternalReference ref0, ExternalReference ref1) {
+ int64_t offset = (ref0.address() - ref1.address());
+ // Check that fits into int.
+ DCHECK(static_cast<int>(offset) == offset);
+ return static_cast<int>(offset);
+}
+
+
+// Prepares stack to put arguments (aligns and so on). WIN64 calling
+// convention requires to put the pointer to the return value slot into
+// rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
+// context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
+// inside the exit frame (not GCed) accessible via StackSpaceOperand.
+static void PrepareCallApiFunction(MacroAssembler* masm, int arg_stack_space) {
+ __ EnterApiExitFrame(arg_stack_space);
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Clobbers r14, r15, rbx and
+// caller-save registers. Restores context. On return removes
+// stack_space * kPointerSize (GCed).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg, int stack_space,
+ Operand* stack_space_operand,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
+ Label prologue;
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label write_back;
+
+ Isolate* isolate = masm->isolate();
+ Factory* factory = isolate->factory();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = Offset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = Offset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address(isolate);
+
+ DCHECK(rdx.is(function_address) || r8.is(function_address));
+ // Allocate HandleScope in callee-save registers.
+ Register prev_next_address_reg = r14;
+ Register prev_limit_reg = rbx;
+ Register base_reg = r15;
+ __ Move(base_reg, next_address);
+ __ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
+ __ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ __ addl(Operand(base_reg, kLevelOffset), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1);
+ __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ Move(rax, ExternalReference::is_profiling_address(isolate));
+ __ cmpb(Operand(rax, 0), Immediate(0));
+ __ j(zero, &profiler_disabled);
+
+ // Third parameter is the address of the actual getter function.
+ __ Move(thunk_last_arg, function_address);
+ __ Move(rax, thunk_ref);
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ // Call the api function!
+ __ Move(rax, function_address);
+
+ __ bind(&end_profiler_check);
+
+ // Call the api function!
+ __ call(rax);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1);
+ __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ // Load the value from ReturnValue
+ __ movp(rax, return_value_operand);
+ __ bind(&prologue);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ subl(Operand(base_reg, kLevelOffset), Immediate(1));
+ __ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
+ __ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ __ j(not_equal, &delete_allocated_handles);
+ __ bind(&leave_exit_frame);
+
+ // Check if the function scheduled an exception.
+ __ Move(rsi, scheduled_exception_address);
+ __ Cmp(Operand(rsi, 0), factory->the_hole_value());
+ __ j(not_equal, &promote_scheduled_exception);
+ __ bind(&exception_handled);
+
+#if DEBUG
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = rax;
+ Register map = rcx;
+
+ __ JumpIfSmi(return_value, &ok, Label::kNear);
+ __ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ __ CmpInstanceType(map, LAST_NAME_TYPE);
+ __ j(below_equal, &ok, Label::kNear);
+
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &ok, Label::kNear);
+
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, Heap::kTrueValueRootIndex);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, Heap::kFalseValueRootIndex);
+ __ j(equal, &ok, Label::kNear);
+
+ __ CompareRoot(return_value, Heap::kNullValueRootIndex);
+ __ j(equal, &ok, Label::kNear);
+
+ __ Abort(kAPICallReturnedInvalidObject);
+
+ __ bind(&ok);
+#endif
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ movp(rsi, *context_restore_operand);
+ }
+ if (stack_space_operand != nullptr) {
+ __ movp(rbx, *stack_space_operand);
+ }
+ __ LeaveApiExitFrame(!restore_context);
+ if (stack_space_operand != nullptr) {
+ DCHECK_EQ(stack_space, 0);
+ __ PopReturnAddressTo(rcx);
+ __ addq(rsp, rbx);
+ __ jmp(rcx);
+ } else {
+ __ ret(stack_space * kPointerSize);
+ }
+
+ __ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ __ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
+ __ movp(prev_limit_reg, rax);
+ __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
+ __ LoadAddress(rax,
+ ExternalReference::delete_handle_scope_extensions(isolate));
+ __ call(rax);
+ __ movp(rax, prev_limit_reg);
+ __ jmp(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
- // -- rax : callee
+ // -- rdi : callee
// -- rbx : call_data
// -- rcx : holder
// -- rdx : api_function_address
// -- rsi : context
- // --
+ // -- rax : number of arguments if argc is a register
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -- ...
@@ -4636,16 +4936,12 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- Register callee = rax;
+ Register callee = rdi;
Register call_data = rbx;
Register holder = rcx;
Register api_function_address = rdx;
- Register return_address = rdi;
Register context = rsi;
-
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
+ Register return_address = r8;
typedef FunctionCallbackArguments FCA;
@@ -4658,12 +4954,12 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
+ DCHECK(argc.is_immediate() || rax.is(argc.reg()));
+
__ PopReturnAddressTo(return_address);
// context save
__ Push(context);
- // load context from callee
- __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
// callee
__ Push(callee);
@@ -4679,8 +4975,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// return value default
__ Push(scratch);
// isolate
- __ Move(scratch,
- ExternalReference::isolate_address(isolate()));
+ __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch);
// holder
__ Push(holder);
@@ -4689,19 +4984,38 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// Push return address back on stack.
__ PushReturnAddressFrom(return_address);
+ // load context from callee
+ __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
+
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiStackSpace);
+ PrepareCallApiFunction(masm, kApiStackSpace);
// FunctionCallbackInfo::implicit_args_.
__ movp(StackSpaceOperand(0), scratch);
- __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
- __ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
- __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
- // FunctionCallbackInfo::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
+ if (argc.is_immediate()) {
+ __ addp(scratch, Immediate((argc.immediate() + FCA::kArgsLength - 1) *
+ kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ movp(StackSpaceOperand(1), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Set(StackSpaceOperand(2), argc.immediate());
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Set(StackSpaceOperand(3), 0);
+ } else {
+ __ leap(scratch, Operand(scratch, argc.reg(), times_pointer_size,
+ (FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ movp(StackSpaceOperand(1), scratch);
+ // FunctionCallbackInfo::length_.
+ __ movp(StackSpaceOperand(2), argc.reg());
+ // FunctionCallbackInfo::is_construct_call_.
+ __ leap(argc.reg(), Operand(argc.reg(), times_pointer_size,
+ (FCA::kArgsLength + 1) * kPointerSize));
+ __ movp(StackSpaceOperand(3), argc.reg());
+ }
#if defined(__MINGW64__) || defined(_WIN64)
Register arguments_arg = rcx;
@@ -4719,23 +5033,41 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ leap(arguments_arg, StackSpaceOperand(0));
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
// Accessor for FunctionCallbackInfo and first js arg.
StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
FCA::kArgsLength - FCA::kContextSaveIndex);
- // Stores return the first js argument
+ Operand is_construct_call_operand = StackSpaceOperand(3);
Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
- __ CallApiFunctionAndReturn(
- api_function_address,
- thunk_ref,
- callback_arg,
- argc + FCA::kArgsLength + 1,
- return_value_operand,
- &context_restore_operand);
+ return_first_arg ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
+ int stack_space = 0;
+ Operand* stack_space_operand = &is_construct_call_operand;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_operand = nullptr;
+ }
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
+ stack_space, stack_space_operand,
+ return_value_operand, &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(rax), false,
+ call_data_undefined);
+}
+
+
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
@@ -4769,7 +5101,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ leap(name_arg, Operand(rsp, kPCOnStackSize));
- __ PrepareCallApiFunction(kArgStackSpace);
+ PrepareCallApiFunction(masm, kArgStackSpace);
__ leap(scratch, Operand(name_arg, 1 * kPointerSize));
// v8::PropertyAccessorInfo::args_.
@@ -4792,12 +5124,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
Operand return_value_operand = args.GetArgumentOperand(
PropertyCallbackArguments::kArgsLength - 1 -
PropertyCallbackArguments::kReturnValueOffset);
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- getter_arg,
- kStackSpace,
- return_value_operand,
- NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
+ kStackSpace, nullptr, return_value_operand, NULL);
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 54fe9b0cfe..2fc61814c8 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -160,6 +160,8 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
kDoubleRegsSize;
+ __ Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
+
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
// this on linux), since it is another parameter passing register on windows.
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 75adc8988f..bed99d101a 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -503,7 +503,7 @@ int DisassemblerX64::PrintRightOperandHelper(
case 0:
if ((rm & 7) == 5) {
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
- AppendToBuffer("[0x%x]", disp);
+ AppendToBuffer("[rip+0x%x]", disp);
return 5;
} else if ((rm & 7) == 4) {
// Codes for SIB byte.
@@ -803,7 +803,7 @@ int DisassemblerX64::ShiftInstruction(byte* data) {
UnimplementedInstruction();
return count + 1;
}
- DCHECK_NE(NULL, mnem);
+ DCHECK_NOT_NULL(mnem);
AppendToBuffer("%s%c ", mnem, operand_size_code());
}
count += PrintRightOperand(data + count);
@@ -1500,6 +1500,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
+ } else if (opcode == 0x0B) {
+ AppendToBuffer("ud2");
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 24747ee9e8..a64f504282 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -114,7 +114,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ if (is_sloppy(info->language_mode()) && !info->is_native()) {
Label ok;
// +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
@@ -143,7 +143,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!info->function()->is_generator() || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
@@ -189,7 +189,7 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in rdi.
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ Push(rdi);
- __ Push(info->scope()->GetScopeInfo());
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
@@ -232,6 +232,26 @@ void FullCodeGenerator::Generate() {
}
}
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ leap(rdx,
+ Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ Push(rdx);
+ __ Push(Smi::FromInt(num_parameters));
+ __ Push(Smi::FromInt(rest_index));
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, rax, rbx, rdx);
+ }
+
// Possibly allocate an arguments object.
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
@@ -254,15 +274,20 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
+
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type);
+ ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, rax, rbx, rdx);
@@ -416,7 +441,11 @@ void FullCodeGenerator::EmitReturnSequence() {
__ popq(rbp);
int no_frame_start = masm_->pc_offset();
- int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ int arg_count = info_->scope()->num_parameters() + 1;
+ if (IsSubclassConstructor(info_->function()->kind())) {
+ arg_count++;
+ }
+ int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, rcx);
// Add padding that will be overwritten by a debugger breakpoint. We
@@ -888,15 +917,16 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
+ ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
- DCHECK(variable->interface()->IsFrozen());
+ DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(rax, scope_->ContextChainLength(scope_->ScriptScope()));
- __ movp(rax, ContextOperand(rax, variable->interface()->Index()));
+ __ movp(rax, ContextOperand(rax, descriptor->Index()));
__ movp(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
// Assign it.
@@ -1218,6 +1248,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
// Generate code for the body of the loop.
@@ -1255,7 +1286,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ Move(rbx, info);
__ CallStub(&stub);
} else {
@@ -1495,6 +1526,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
+ } else if (var->is_this()) {
+ CHECK(info_->function() != nullptr &&
+ (info_->function()->kind() & kSubclassConstructor) != 0);
+ // TODO(dslomov): implement 'this' hole check elimination.
+ skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
@@ -1651,11 +1687,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
__ Push(rax); // Save result on the stack
@@ -1696,7 +1734,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
if (property->emit_store()) {
EmitSetHomeObjectIfNeeded(value, 2);
- __ Push(Smi::FromInt(SLOPPY)); // Strict mode
+ __ Push(Smi::FromInt(SLOPPY)); // Language mode
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
__ Drop(3);
@@ -1705,17 +1743,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::PROTOTYPE:
__ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
- if (property->emit_store()) {
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- __ Drop(2);
- }
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
break;
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = value;
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = value;
+ }
break;
}
}
@@ -1735,6 +1774,65 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ Push(rax); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ __ Push(Smi::FromInt(NONE));
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ Push(Smi::FromInt(NONE));
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ Push(Smi::FromInt(NONE));
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ break;
+ }
+ }
+ }
+
if (expr->has_function()) {
DCHECK(result_saved);
__ Push(Operand(rsp, 0));
@@ -1790,6 +1888,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
bool result_saved = false; // Is the result saved to the stack?
@@ -1929,19 +2028,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ Push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
- mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
+ EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
PrepareForBailout(expr->binary_operation(), TOS_REG);
@@ -2321,7 +2416,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left,
Expression* right) {
// Do combined smi check of the operands. Left operand is on the
@@ -2336,7 +2430,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movp(rax, rcx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2393,33 +2487,34 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
- Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- DCHECK(key != NULL);
if (property->is_static()) {
__ Push(Operand(rsp, kPointerSize)); // constructor
} else {
__ Push(Operand(rsp, 0)); // prototype
}
- VisitForStackValue(key);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
- __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ __ Push(Smi::FromInt(DONT_ENUM));
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
break;
case ObjectLiteral::Property::SETTER:
- __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ __ Push(Smi::FromInt(DONT_ENUM));
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
break;
default:
@@ -2435,11 +2530,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ Pop(rdx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2514,7 +2607,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Pop(StoreDescriptor::ReceiverRegister());
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
break;
}
@@ -2581,7 +2674,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Push(rax); // Value.
__ Push(rsi); // Context.
__ Push(var->name());
- __ Push(Smi::FromInt(strict_mode()));
+ __ Push(Smi::FromInt(language_mode()));
__ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
// Assignment to var or initializing assignment to let/const in harmony
@@ -2596,7 +2689,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+ } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
}
@@ -2629,8 +2722,8 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(rax);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
+ __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
4);
}
@@ -2642,9 +2735,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(rax);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime(
+ (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
@@ -2656,7 +2750,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(rax));
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2680,8 +2775,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(rax);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
@@ -2696,8 +2789,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
- context()->Plug(rax);
}
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(rax);
}
@@ -2846,9 +2940,8 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(
- isolate(), arg_count, call_type);
- __ Move(rdx, SmiFromSlot(expr->CallFeedbackSlot()));
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ __ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2879,7 +2972,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Push(args.GetReceiverOperand());
// Push the language mode.
- __ Push(Smi::FromInt(strict_mode()));
+ __ Push(Smi::FromInt(language_mode()));
// Push the start position of the scope the calls resides in.
__ Push(Smi::FromInt(scope()->start_position()));
@@ -2889,8 +2982,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
- DCHECK(super_ref != NULL);
+void FullCodeGenerator::EmitLoadSuperConstructor() {
__ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kGetPrototype, 1);
}
@@ -3004,11 +3096,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
}
} else if (call_type == Call::SUPER_CALL) {
- SuperReference* super_ref = callee->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ Push(result_register());
- VisitForStackValue(super_ref->this_var());
- EmitCall(expr, CallICState::METHOD);
+ EmitSuperConstructorCall(expr);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -3036,12 +3124,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- if (expr->expression()->IsSuperReference()) {
- EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
- __ Push(result_register());
- } else {
- VisitForStackValue(expr->expression());
- }
+ DCHECK(!expr->expression()->IsSuperReference());
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3075,6 +3159,66 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ if (!ValidateSuperCall(expr)) return;
+
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ Push(result_register());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into edi and eax.
+ __ Set(rax, arg_count);
+ __ movp(rdi, Operand(rsp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ UNREACHABLE();
+ /* TODO(dslomov): support pretenuring.
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ */
+ }
+
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, SmiFromSlot(expr->CallFeedbackSlot()));
+
+ CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ RecordJSReturnSite(expr);
+
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ Variable* this_var = super_ref->this_var()->var();
+ GetVar(rcx, this_var);
+ __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
+ Label uninitialized_this;
+ __ j(equal, &uninitialized_this);
+ __ Push(this_var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
+ EmitVariableAssignment(this_var, Token::INIT_CONST);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3602,7 +3746,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3936,6 +4080,56 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ Push(result_register());
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, args_set_up, runtime;
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
+ // default constructor has no arguments, so no adaptor frame means no args.
+ __ movp(rax, Immediate(0));
+ __ jmp(&args_set_up);
+
+ // Copy arguments from adaptor frame.
+ {
+ __ bind(&adaptor_frame);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger64(rcx, rcx);
+
+ // Subtract 1 from arguments count, for new.target.
+ __ subp(rcx, Immediate(1));
+ __ movp(rax, rcx);
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ Label loop;
+ __ bind(&loop);
+ __ Push(Operand(rdx, -1 * kPointerSize));
+ __ subp(rdx, Immediate(kPointerSize));
+ __ decp(rcx);
+ __ j(not_zero, &loop);
+ }
+
+ __ bind(&args_set_up);
+ __ movp(rdi, Operand(rsp, rax, times_pointer_size, 0));
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ context()->Plug(result_register());
+}
+
+
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
@@ -3954,7 +4148,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(2, args->length());
- DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
@@ -4411,14 +4605,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ Push(Smi::FromInt(strict_mode()));
+ __ Push(Smi::FromInt(language_mode()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- DCHECK(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(is_sloppy(language_mode()) || var->is_this());
if (var->IsUnallocated()) {
__ Push(GlobalObjectOperand());
__ Push(var->name());
@@ -4636,6 +4830,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4670,8 +4865,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
- NO_OVERWRITE).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4741,7 +4936,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::NameRegister());
__ Pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index f19979d467..1ca0c85877 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -101,7 +101,19 @@ void FastCloneShallowObjectDescriptor::Initialize(
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {rsi, rbx, rdx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rbx, rdx, rdi};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
@@ -128,6 +140,16 @@ void CallFunctionWithFeedbackDescriptor::Initialize(
}
+void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rdi, rdx, rbx};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// rax : number of arguments
// rbx : feedback vector
@@ -297,7 +319,28 @@ void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {
rsi, // context
- rax, // callee
+ rdi, // callee
+ rbx, // call_data
+ rcx, // holder
+ rdx, // api_function_address
+ rax, // actual number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Integer32(), // actual number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rsi, // context
+ rdi, // callee
rbx, // call_data
rcx, // holder
rdx, // api_function_address
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 10f2bb8cdd..f4d75775bb 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -67,7 +67,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
}
@@ -129,8 +128,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions need to replace the receiver with the global proxy
// when called as functions (without an explicit receiver object).
- if (info_->this_has_uses() &&
- info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
StackArgumentsAccessor args(rsp, scope()->num_parameters());
@@ -306,7 +304,7 @@ bool LCodeGen::GenerateJumpTable() {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
@@ -725,7 +723,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- const char* detail,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -770,21 +768,22 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
__ bind(&done);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cc == no_condition && frame_is_built_ &&
!info()->saves_caller_doubles()) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() ||
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
@@ -798,11 +797,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- const char* detail) {
+ Deoptimizer::DeoptReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, instr, detail, bailout_type);
+ DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
}
@@ -824,6 +823,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1033,7 +1033,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ andl(dividend, Immediate(mask));
__ negl(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
@@ -1050,7 +1050,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, "division by zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1065,7 +1065,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmpl(dividend, Immediate(0));
- DeoptimizeIf(less, instr, "minus zero");
+ DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -1087,7 +1087,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr, "division by zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1098,7 +1098,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, "minus zero");
+ DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Set(result_reg, 0);
@@ -1118,7 +1118,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
__ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@@ -1144,13 +1144,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
return;
}
@@ -1177,7 +1177,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, "division by zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1185,7 +1185,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1232,7 +1232,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr, "division by zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1241,7 +1241,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1251,7 +1251,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr, "overflow");
+ DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1280,19 +1280,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr, "overflow");
+ DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ testl(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, "lost precision");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1313,7 +1313,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, "division by zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1321,7 +1321,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
@@ -1331,7 +1331,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ movl(rax, rdx);
__ imull(rax, rax, Immediate(divisor));
__ subl(rax, dividend);
- DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1351,7 +1351,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr, "division by zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1360,7 +1360,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1370,7 +1370,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr, "overflow");
+ DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1381,7 +1381,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
- DeoptimizeIf(not_zero, instr, "lost precision");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1458,7 +1458,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1477,10 +1477,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
? !instr->hydrogen_value()->representation().IsSmi()
: SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, "minus zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmpl(kScratchRegister, Immediate(0));
- DeoptimizeIf(less, instr, "minus zero");
+ DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1488,7 +1488,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToOperand(right));
}
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1496,7 +1496,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToRegister(right));
}
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
}
__ bind(&done);
}
@@ -1609,7 +1609,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl_cl(ToRegister(left));
if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr, "negative value");
+ DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
case Token::SHL:
@@ -1638,7 +1638,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl(ToRegister(left), Immediate(shift_count));
} else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr, "negative value");
+ DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
case Token::SHL:
@@ -1653,7 +1653,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shll(ToRegister(left), Immediate(shift_count - 1));
}
__ Integer32ToSmi(ToRegister(left), ToRegister(left));
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
@@ -1696,7 +1696,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
@@ -1717,19 +1717,7 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
- DCHECK(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
- double v = instr->value();
- uint64_t int_val = bit_cast<uint64_t, double>(v);
- // Use xor to produce +0.0 in a fast and compact way, but avoid to
- // do so if the constant is -0.0.
- if (int_val == 0) {
- __ xorps(res, res);
- } else {
- Register tmp = ToRegister(instr->temp());
- __ Set(tmp, int_val);
- __ movq(res, tmp);
- }
+ __ Move(ToDoubleRegister(instr->result()), instr->bits());
}
@@ -1761,9 +1749,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(rax));
Condition cc = masm()->CheckSmi(object);
- DeoptimizeIf(cc, instr, "Smi");
+ DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- DeoptimizeIf(not_equal, instr, "not a date object");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ movp(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1927,7 +1915,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
}
@@ -2015,23 +2003,45 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
- // All operations except MOD are computed in-place.
- DCHECK(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
- __ addsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vaddsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ addsd(left, right);
+ }
break;
case Token::SUB:
- __ subsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vsubsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ subsd(left, right);
+ }
break;
case Token::MUL:
- __ mulsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vmulsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ mulsd(left, right);
+ }
break;
case Token::DIV:
- __ divsd(left, right);
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a mulsd depending on the result
- __ movaps(left, left);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vdivsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
+ }
break;
case Token::MOD: {
XMMRegister xmm_scratch = double_scratch0();
@@ -2056,8 +2066,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(rax));
DCHECK(ToRegister(instr->result()).is(rax));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2172,7 +2181,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, "Smi");
+ DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
}
const Register map = kScratchRegister;
@@ -2226,7 +2235,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, "unexpected object");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
}
}
}
@@ -2843,7 +2852,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
}
@@ -2896,7 +2905,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
DCHECK(!value.is(cell));
__ Move(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
// Store the value.
__ movp(Operand(cell, 0), value);
} else {
@@ -2915,7 +2924,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@@ -2936,7 +2945,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(target, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
} else {
__ j(not_equal, &skip_assignment);
}
@@ -3036,7 +3045,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -3148,7 +3157,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
- DeoptimizeIf(negative, instr, "negative value");
+ DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3187,7 +3196,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
@@ -3244,10 +3253,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr, "not a Smi");
+ DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
}
}
@@ -3394,9 +3403,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr, "Smi");
+ DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr, "not a JavaScript object");
+ DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@@ -3423,7 +3432,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpp(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr, "too many arguments");
+ DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
__ Push(receiver);
__ movp(receiver, length);
@@ -3490,24 +3499,19 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- RDIState rdi_state) {
+ int formal_parameter_count, int arity,
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
+ Register function_reg = rdi;
LPointerMap* pointers = instr->pointer_map();
if (can_invoke_directly) {
- if (rdi_state == RDI_UNINITIALIZED) {
- __ Move(rdi, function);
- }
-
// Change context.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
// Set rax to arguments count if adaption is not needed. Assumes that rax
// is available to write to at this point.
@@ -3519,7 +3523,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
}
// Set up deoptimization.
@@ -3530,7 +3534,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
}
}
@@ -3642,7 +3646,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
Register tmp = input_reg.is(rax) ? rcx : rax;
@@ -3688,7 +3692,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, "overflow");
+ DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
__ bind(&is_positive);
}
@@ -3699,7 +3703,7 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negp(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, "overflow");
+ DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
__ bind(&is_positive);
}
@@ -3755,18 +3759,18 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Deoptimize if minus zero.
__ movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, "minus zero");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr, "NaN");
+ DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3775,7 +3779,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ testq(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
__ Set(output_reg, 0);
__ jmp(&done);
__ bind(&positive_sign);
@@ -3785,7 +3789,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3796,7 +3800,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ bind(&done);
}
@@ -3823,7 +3827,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3839,7 +3843,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3854,7 +3858,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
- DeoptimizeIf(negative, instr, "minus zero");
+ DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
}
__ Set(output_reg, 0);
__ bind(&done);
@@ -3933,7 +3937,7 @@ void LCodeGen::DoPower(LPower* instr) {
Label no_deopt;
__ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3968,10 +3972,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(not_carry, &zero, Label::kNear);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- Operand nan_operand = masm()->ExternalOperand(nan);
- __ movsd(input_reg, nan_operand);
+ __ pcmpeqd(input_reg, input_reg);
__ jmp(&done, Label::kNear);
__ bind(&zero);
ExternalReference ninf =
@@ -4020,9 +4021,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- RDI_CONTAINS_TARGET);
+ instr->arity(), instr);
}
}
@@ -4033,8 +4032,30 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(rdx));
+ DCHECK(vector_register.is(rbx));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ Move(vector_register, vector);
+ __ Move(slot_register, Smi::FromInt(index));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4249,7 +4270,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4310,7 +4331,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, "out of bounds");
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
}
}
@@ -4397,17 +4418,10 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ movsxlq(ToRegister(key), ToRegister(key));
}
if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value, Label::kNear); // NaN.
-
- __ Set(kScratchRegister,
- bit_cast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(value, kScratchRegister);
-
- __ bind(&have_value);
+ XMMRegister xmm_scratch = double_scratch0();
+ // Turn potential sNaN value into qNaN.
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ subsd(value, xmm_scratch);
}
Operand double_store_operand = BuildFastArrayOperand(
@@ -4516,7 +4530,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4558,7 +4572,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, "memento found");
+ DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4878,12 +4892,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
- DeoptimizeIf(NegateCondition(is_smi), instr, "overflow");
+ DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
}
__ Integer32ToSmi(output, input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
@@ -4893,7 +4907,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr, "not a Smi");
+ DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
} else {
__ AssertSmi(input);
}
@@ -4924,7 +4938,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
}
if (deoptimize_on_minus_zero) {
@@ -4934,7 +4948,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ j(not_equal, &done, Label::kNear);
__ movmskpd(kScratchRegister, result_reg);
__ testq(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
@@ -4943,10 +4957,9 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
- __ xorps(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
+ __ pcmpeqd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
}
} else {
@@ -4990,26 +5003,27 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
- DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
+ DeoptimizeIf(not_equal, instr,
+ Deoptimizer::kNotAHeapNumberUndefinedBoolean);
__ Set(input_reg, 0);
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(xmm0));
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, xmm0);
__ Cvtlsi2sd(scratch, input_reg);
__ ucomisd(xmm0, scratch);
- DeoptimizeIf(not_equal, instr, "lost precision");
- DeoptimizeIf(parity_even, instr, "NaN");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ testl(input_reg, input_reg);
__ j(not_zero, done);
__ movmskpd(input_reg, xmm0);
__ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
}
}
@@ -5080,11 +5094,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
&is_nan, &minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, "lost precision");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, "NaN");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, "minus zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
@@ -5107,21 +5121,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
&minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, "lost precision");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, "NaN");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, "minus zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
__ bind(&done);
__ Integer32ToSmi(result_reg, result_reg);
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr, "not a Smi");
+ DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
}
@@ -5129,7 +5143,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr, "Smi");
+ DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
}
}
@@ -5149,14 +5163,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr, "wrong instance type");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
} else {
- DeoptimizeIf(below, instr, "wrong instance type");
+ DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr, "wrong instance type");
+ DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
}
}
} else {
@@ -5168,13 +5182,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
+ Deoptimizer::kWrongInstanceType);
} else {
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ andb(kScratchRegister, Immediate(mask));
__ cmpb(kScratchRegister, Immediate(tag));
- DeoptimizeIf(not_equal, instr, "wrong instance type");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
}
}
}
@@ -5183,7 +5198,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(reg, instr->hydrogen()->object().handle());
- DeoptimizeIf(not_equal, instr, "value mismatch");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
}
@@ -5198,7 +5213,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ testp(rax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, "instance migration failed");
+ DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
}
@@ -5252,7 +5267,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr, "wrong map");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
}
__ bind(&success);
@@ -5291,7 +5306,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
@@ -5501,7 +5516,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -5770,19 +5785,19 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(equal, instr, "undefined");
+ DeoptimizeIf(equal, instr, Deoptimizer::kUndefined);
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmpp(rax, null_value);
- DeoptimizeIf(equal, instr, "null");
+ DeoptimizeIf(equal, instr, Deoptimizer::kNull);
Condition cc = masm()->CheckSmi(rax);
- DeoptimizeIf(cc, instr, "Smi");
+ DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- DeoptimizeIf(below_equal, instr, "wrong instance type");
+ DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
@@ -5797,7 +5812,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
- DeoptimizeIf(not_equal, instr, "wrong map");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
__ bind(&use_cache);
}
@@ -5819,7 +5834,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(cc, instr, "no cache");
+ DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
}
@@ -5827,7 +5842,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, "wrong map");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index ccd90b53c6..cc7c00b1ca 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -118,7 +118,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictMode strict_mode() const { return info()->strict_mode(); }
+ LanguageMode language_mode() const { return info()->language_mode(); }
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -188,27 +188,22 @@ class LCodeGen: public LCodeGenBase {
void LoadContextFromDeferred(LOperand* context);
- enum RDIState {
- RDI_UNINITIALIZED,
- RDI_CONTAINS_TARGET
- };
-
// Generate a direct call to a known function. Expects the function
// to be in rdi.
void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- RDIState rdi_state);
+ int formal_parameter_count, int arity,
+ LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
int argc);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
+ void DeoptimizeIf(Condition cc, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
+ void DeoptimizeIf(Condition cc, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index c1831af84b..2b67ce96cc 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -268,6 +268,20 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
+void LCallFunction::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ if (hydrogen()->HasVectorAndSlot()) {
+ stream->Add(" (type-feedback-vector ");
+ temp_vector()->PrintTo(stream);
+ stream->Add(" ");
+ temp_slot()->PrintTo(stream);
+ stream->Add(")");
+ }
+}
+
+
void LCallJSFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -748,7 +762,8 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result)
+ : DefineSameAsFirst(result);
}
}
@@ -1259,7 +1274,14 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
- LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(rdx);
+ vector = FixedTemp(rbx);
+ }
+ LCallFunction* call =
+ new (zone()) LCallFunction(context, function, slot, vector);
return MarkAsCall(DefineFixed(call, rax), instr);
}
@@ -2049,8 +2071,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
} else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
- LOperand* temp = TempRegister();
- return DefineAsRegister(new(zone()) LConstantD(temp));
+ return DefineAsRegister(new (zone()) LConstantD);
} else if (r.IsExternal()) {
return DefineAsRegister(new(zone()) LConstantE);
} else if (r.IsTagged()) {
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 8b7a5bc3c9..ec54c7d583 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -1319,18 +1319,12 @@ class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantD(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- double value() const { return hydrogen()->DoubleValue(); }
+ uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
};
@@ -1934,11 +1928,14 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LCallFunction(LOperand* context, LOperand* function) {
+ LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = function;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
@@ -1946,7 +1943,11 @@ class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
int arity() const { return hydrogen()->argument_count() - 1; }
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
};
@@ -2211,7 +2212,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
@@ -2266,7 +2267,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 5b897de545..a1172262b0 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -692,175 +692,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-static int Offset(ExternalReference ref0, ExternalReference ref1) {
- int64_t offset = (ref0.address() - ref1.address());
- // Check that fits into int.
- DCHECK(static_cast<int>(offset) == offset);
- return static_cast<int>(offset);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
- EnterApiExitFrame(arg_stack_space);
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg,
- int stack_space,
- Operand return_value_operand,
- Operand* context_restore_operand) {
- Label prologue;
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label write_back;
-
- Factory* factory = isolate()->factory();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = Offset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = Offset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate());
-
- DCHECK(rdx.is(function_address) || r8.is(function_address));
- // Allocate HandleScope in callee-save registers.
- Register prev_next_address_reg = r14;
- Register prev_limit_reg = rbx;
- Register base_reg = r15;
- Move(base_reg, next_address);
- movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
- movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
- addl(Operand(base_reg, kLevelOffset), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1);
- LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
-
- Label profiler_disabled;
- Label end_profiler_check;
- Move(rax, ExternalReference::is_profiling_address(isolate()));
- cmpb(Operand(rax, 0), Immediate(0));
- j(zero, &profiler_disabled);
-
- // Third parameter is the address of the actual getter function.
- Move(thunk_last_arg, function_address);
- Move(rax, thunk_ref);
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- // Call the api function!
- Move(rax, function_address);
-
- bind(&end_profiler_check);
-
- // Call the api function!
- call(rax);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1);
- LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- // Load the value from ReturnValue
- movp(rax, return_value_operand);
- bind(&prologue);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- subl(Operand(base_reg, kLevelOffset), Immediate(1));
- movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
- j(not_equal, &delete_allocated_handles);
- bind(&leave_exit_frame);
-
- // Check if the function scheduled an exception.
- Move(rsi, scheduled_exception_address);
- Cmp(Operand(rsi, 0), factory->the_hole_value());
- j(not_equal, &promote_scheduled_exception);
- bind(&exception_handled);
-
-#if ENABLE_EXTRA_CHECKS
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = rax;
- Register map = rcx;
-
- JumpIfSmi(return_value, &ok, Label::kNear);
- movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- j(below, &ok, Label::kNear);
-
- CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- j(above_equal, &ok, Label::kNear);
-
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kTrueValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kFalseValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kNullValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- Abort(kAPICallReturnedInvalidObject);
-
- bind(&ok);
-#endif
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- movp(rsi, *context_restore_operand);
- }
- LeaveApiExitFrame(!restore_context);
- ret(stack_space * kPointerSize);
-
- bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
- }
- jmp(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
- movp(prev_limit_reg, rax);
- LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
- LoadAddress(rax,
- ExternalReference::delete_handle_scope_extensions(isolate()));
- call(rax);
- movp(rax, prev_limit_reg);
- jmp(&leave_exit_frame);
-}
-
-
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
@@ -2367,7 +2198,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
#endif
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
@@ -2747,21 +2578,8 @@ void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
xorps(dst, dst);
} else {
- unsigned cnt = base::bits::CountPopulation32(src);
- unsigned nlz = base::bits::CountLeadingZeros32(src);
- unsigned ntz = base::bits::CountTrailingZeros32(src);
- if (nlz + cnt + ntz == 32) {
- pcmpeqd(dst, dst);
- if (ntz == 0) {
- psrld(dst, 32 - cnt);
- } else {
- pslld(dst, 32 - cnt);
- if (nlz != 0) psrld(dst, nlz);
- }
- } else {
- movl(kScratchRegister, Immediate(src));
- movq(dst, kScratchRegister);
- }
+ movl(kScratchRegister, Immediate(src));
+ movq(dst, kScratchRegister);
}
}
@@ -2772,18 +2590,7 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
if (upper == 0) {
Move(dst, lower);
} else {
- unsigned cnt = base::bits::CountPopulation64(src);
- unsigned nlz = base::bits::CountLeadingZeros64(src);
- unsigned ntz = base::bits::CountTrailingZeros64(src);
- if (nlz + cnt + ntz == 64) {
- pcmpeqd(dst, dst);
- if (ntz == 0) {
- psrlq(dst, 64 - cnt);
- } else {
- psllq(dst, 64 - cnt);
- if (nlz != 0) psrlq(dst, nlz);
- }
- } else if (lower == 0) {
+ if (lower == 0) {
Move(dst, upper);
psllq(dst, 32);
} else {
@@ -2859,10 +2666,15 @@ void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
}
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
movp(value, FieldOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+ Label* miss) {
+ GetWeakValue(value, cell);
JumpIfSmi(value, miss);
}
@@ -3061,7 +2873,7 @@ void MacroAssembler::Call(ExternalReference ext) {
void MacroAssembler::Call(const Operand& op) {
- if (kPointerSize == kInt64Size) {
+ if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
call(op);
} else {
movp(kScratchRegister, op);
@@ -3420,7 +3232,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
XMMRegister xmm_scratch,
Label* fail,
int elements_offset) {
- Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
+ Label smi_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -3429,44 +3241,20 @@ void MacroAssembler::StoreNumberToDoubleElements(
fail,
DONT_DO_SMI_CHECK);
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- cmpl(FieldOperand(maybe_number, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- j(greater_equal, &maybe_nan, Label::kNear);
-
- bind(&not_nan);
- movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- movsd(FieldOperand(elements, index, times_8,
- FixedDoubleArray::kHeaderSize - elements_offset),
- xmm_scratch);
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- j(greater, &is_nan, Label::kNear);
- cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
- j(zero, &not_nan);
- bind(&is_nan);
- // Convert all NaNs to the same canonical NaN value when they are stored in
- // the double array.
- Set(kScratchRegister,
- bit_cast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- movq(xmm_scratch, kScratchRegister);
- jmp(&have_double_value, Label::kNear);
+ // Double value, turn potential sNaN into qNaN.
+ Move(xmm_scratch, 1.0);
+ mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ jmp(&done, Label::kNear);
bind(&smi_value);
// Value is a smi. convert to a double and store.
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
Cvtlsi2sd(xmm_scratch, kScratchRegister);
+ bind(&done);
movsd(FieldOperand(elements, index, times_8,
FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
- bind(&done);
}
@@ -3630,6 +3418,18 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
}
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
+ : AccessorPair::kSetterOffset;
+ movp(dst, FieldOperand(dst, offset));
+}
+
+
void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
Register scratch2, Handle<WeakCell> cell,
Handle<Code> success,
@@ -4384,7 +4184,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Check that the value is a field property.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- DCHECK_EQ(FIELD, 0);
+ DCHECK_EQ(DATA, 0);
Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Smi::FromInt(PropertyDetails::TypeField::kMask));
j(not_zero, miss);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 465666555b..9f25d60ddf 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -846,6 +846,8 @@ class MacroAssembler: public Assembler {
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+ void GetWeakValue(Register value, Handle<WeakCell> cell);
+
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
@@ -893,6 +895,8 @@ class MacroAssembler: public Assembler {
void Move(XMMRegister dst, uint32_t src);
void Move(XMMRegister dst, uint64_t src);
+ void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
+ void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@@ -1044,6 +1048,8 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template<typename Field>
void DecodeField(Register reg) {
@@ -1326,24 +1332,6 @@ class MacroAssembler: public Assembler {
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext, int result_size);
- // Prepares stack to put arguments (aligns and so on). WIN64 calling
- // convention requires to put the pointer to the return value slot into
- // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
- // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
- // inside the exit frame (not GCed) accessible via StackSpaceOperand.
- void PrepareCallApiFunction(int arg_stack_space);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Clobbers r14, r15, rbx and
- // caller-save registers. Restores context. On return removes
- // stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg,
- int stack_space,
- Operand return_value_operand,
- Operand* context_restore_operand);
-
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
// etc., not pushed. The argument count assumes all arguments are word sized.
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 82a3735d77..1818dbb72c 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -93,12 +93,11 @@ namespace internal {
#define __ ACCESS_MASM((&masm_))
-RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(zone->isolate(), NULL, kRegExpCodeSize),
+RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone,
+ Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(isolate, NULL, kRegExpCodeSize),
no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4, zone),
mode_(mode),
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index e3733775de..70a6709b54 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -17,7 +17,8 @@ namespace internal {
class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerX64(Mode mode, int registers_to_save, Zone* zone);
+ RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
virtual ~RegExpMacroAssemblerX64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index 4177156ee6..f2db021b69 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -256,6 +256,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
+ reloc_info_writer.Finish();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -1201,6 +1202,13 @@ void Assembler::ret(int imm16) {
}
+void Assembler::ud2() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x0B);
+}
+
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -1239,7 +1247,10 @@ void Assembler::bind_to(Label* L, int pos) {
while (L->is_linked()) {
Displacement disp = disp_at(L);
int fixup_pos = L->pos();
- if (disp.type() == Displacement::CODE_RELATIVE) {
+ if (disp.type() == Displacement::CODE_ABSOLUTE) {
+ long_at_put(fixup_pos, reinterpret_cast<int>(buffer_ + pos));
+ internal_reference_positions_.push_back(fixup_pos);
+ } else if (disp.type() == Displacement::CODE_RELATIVE) {
// Relative to Code* heap object pointer.
long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
} else {
@@ -1897,28 +1908,6 @@ void Assembler::setcc(Condition cc, Register reg) {
}
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
- if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -1959,15 +1948,10 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- if (*p != 0) { // 0 means uninitialized.
- *p += pc_delta;
- }
- }
+ // Relocate internal references.
+ for (auto pos : internal_reference_positions_) {
+ int32_t* p = reinterpret_cast<int32_t*>(buffer_ + pos);
+ *p += pc_delta;
}
DCHECK(!buffer_overflow());
@@ -2017,7 +2001,21 @@ void Assembler::emit_operand(Register reg, const Operand& adr) {
if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
RecordRelocInfo(adr.rmode_);
- pc_ += sizeof(int32_t);
+ if (adr.rmode_ == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
+ emit_label(*reinterpret_cast<Label**>(pc_));
+ } else {
+ pc_ += sizeof(int32_t);
+ }
+ }
+}
+
+
+void Assembler::emit_label(Label* label) {
+ if (label->is_bound()) {
+ internal_reference_positions_.push_back(pc_offset());
+ emit(reinterpret_cast<uint32_t>(buffer_ + label->pos()));
+ } else {
+ emit_disp(label, Displacement::CODE_ABSOLUTE);
}
}
@@ -2042,6 +2040,13 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::dd(Label* label) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ emit_label(label);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
DCHECK(!RelocInfo::IsNone(rmode));
// Don't record external references unless the heap will be serialized.
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 1da632f266..05359648ad 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -37,6 +37,9 @@
#ifndef V8_X87_ASSEMBLER_X87_H_
#define V8_X87_ASSEMBLER_X87_H_
+#include <deque>
+
+#include "src/assembler.h"
#include "src/isolate.h"
#include "src/serialize.h"
@@ -348,6 +351,11 @@ class Operand BASE_EMBEDDED {
int32_t disp,
RelocInfo::Mode rmode = RelocInfo::NONE32);
+ static Operand JumpTable(Register index, ScaleFactor scale, Label* table) {
+ return Operand(index, scale, reinterpret_cast<int32_t>(table),
+ RelocInfo::INTERNAL_REFERENCE);
+ }
+
static Operand StaticVariable(const ExternalReference& ext) {
return Operand(reinterpret_cast<int32_t>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
@@ -421,11 +429,7 @@ class Operand BASE_EMBEDDED {
class Displacement BASE_EMBEDDED {
public:
- enum Type {
- UNCONDITIONAL_JUMP,
- CODE_RELATIVE,
- OTHER
- };
+ enum Type { UNCONDITIONAL_JUMP, CODE_RELATIVE, OTHER, CODE_ABSOLUTE };
int data() const { return data_; }
Type type() const { return TypeField::decode(data_); }
@@ -787,6 +791,7 @@ class Assembler : public AssemblerBase {
void int3();
void nop();
void ret(int imm16);
+ void ud2();
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -931,14 +936,18 @@ class Assembler : public AssemblerBase {
void RecordDebugBreakSlot();
// Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable, or provide "force = true" flag to always
- // write a comment.
- void RecordComment(const char* msg, bool force = false);
+ // Use --code-comments to enable.
+ void RecordComment(const char* msg);
+
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(const int reason, const int raw_position);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
+ void dd(Label* label);
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
@@ -1009,6 +1018,8 @@ class Assembler : public AssemblerBase {
void emit_operand(Register reg, const Operand& adr);
+ void emit_label(Label* label);
+
void emit_farith(int b1, int b2, int i);
// labels
@@ -1027,6 +1038,11 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class EnsureSpace;
+ // Internal reference positions, required for (potential) patching in
+ // GrowBuffer(); contains only those internal references whose labels
+ // are already bound.
+ std::deque<int> internal_reference_positions_;
+
// code generation
RelocInfoWriter reloc_info_writer;
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index 51bb3a7c65..9fda5a7188 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -100,6 +100,42 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
+static void Generate_Runtime_NewObject(MacroAssembler* masm,
+ bool create_memento,
+ Register original_constructor,
+ Label* count_incremented,
+ Label* allocated) {
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ mov(edi, Operand(esp, kPointerSize * 2));
+ __ push(edi);
+ offset = kPointerSize;
+ }
+
+ // Must restore esi (context) and edi (constructor) before calling
+ // runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ __ push(edi);
+ __ push(original_constructor);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(ebx, eax); // store result in ebx
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ if (create_memento) {
+ __ jmp(count_incremented);
+ } else {
+ __ jmp(allocated);
+ }
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
@@ -107,6 +143,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- eax: number of arguments
// -- edi: constructor function
// -- ebx: allocation site or undefined
+ // -- edx: original constructor
// -----------------------------------
// Should never create mementos for api functions.
@@ -128,9 +165,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push the function to invoke on the stack.
__ push(edi);
+ __ cmp(edx, edi);
+ Label normal_new;
+ Label count_incremented;
+ Label allocated;
+ __ j(equal, &normal_new);
+
+ // Original constructor and function are different.
+ Generate_Runtime_NewObject(masm, create_memento, edx, &count_incremented,
+ &allocated);
+ __ bind(&normal_new);
+
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
+ Label rt_call;
if (FLAG_inline_new) {
Label undo_allocation;
ExternalReference debug_step_in_fp =
@@ -344,34 +392,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
__ bind(&rt_call);
- int offset = 0;
- if (create_memento) {
- // Get the cell or allocation site.
- __ mov(edi, Operand(esp, kPointerSize * 2));
- __ push(edi);
- offset = kPointerSize;
- }
-
- // Must restore esi (context) and edi (constructor) before calling runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- // edi: function (constructor)
- __ push(edi);
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
- } else {
- __ CallRuntime(Runtime::kNewObject, 1);
- }
- __ mov(ebx, eax); // store result in ebx
-
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
-
+ Generate_Runtime_NewObject(masm, create_memento, edi, &count_incremented,
+ &allocated);
// New object allocated.
// ebx: newly allocated object
__ bind(&allocated);
@@ -478,6 +500,80 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments
+ // -- edi: constructor function
+ // -- ebx: allocation site or undefined
+ // -- edx: original constructor
+ // -----------------------------------
+
+ // TODO(dslomov): support pretenuring
+ CHECK(!FLAG_pretenuring_call_new);
+
+ {
+ FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve actual arguments count.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ SmiUntag(eax);
+
+ // Push new.target.
+ __ push(edx);
+
+ // receiver is the hole.
+ __ push(Immediate(masm->isolate()->factory()->the_hole_value()));
+
+ // Set up pointer to last argument.
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(ecx, eax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(ebx, ecx, times_4, 0));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+
+ __ inc(eax); // Pushed new.target.
+
+
+ // Handle step in.
+ Label skip_step_in;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+ __ j(equal, &skip_step_in);
+
+ __ push(eax);
+ __ push(edi);
+ __ push(edi);
+ __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
+ __ pop(edi);
+ __ pop(eax);
+
+ __ bind(&skip_step_in);
+
+ // Invoke function.
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
+
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ mov(ebx, Operand(esp, 0));
+ }
+
+ __ pop(ecx); // Return address.
+ __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));
+ __ push(ecx);
+ __ ret(0);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -1096,6 +1192,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Get the Array function.
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
+ __ mov(edx, edi);
if (FLAG_debug_code) {
// Initial map for the builtin Array function should be a map.
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 1264dd9b2d..58200bca82 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -419,6 +419,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ CHECK(!has_new_target());
// The key is in edx and the parameter count is in eax.
DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
@@ -485,6 +486,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// esp[8] : receiver displacement
// esp[12] : function
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -513,6 +516,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// ebx = parameter count (tagged)
__ mov(ebx, Operand(esp, 1 * kPointerSize));
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
// TODO(rossberg): Factor out some of the bits that are shared with the other
// Generate* functions.
@@ -752,9 +757,15 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
+
+ if (has_new_target()) {
+ // Subtract 1 from smi-tagged arguments count.
+ __ sub(ecx, Immediate(2));
+ }
+
__ lea(edx, Operand(edx, ecx, times_2,
StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
__ mov(Operand(esp, 2 * kPointerSize), edx);
// Try the new space allocation. Start out with computing the size of
@@ -829,6 +840,31 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
}
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : index of rest parameter
+ // esp[8] : number of parameters
+ // esp[12] : receiver displacement
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 3 * kPointerSize), edx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+}
+
+
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1381,7 +1417,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
__ mov(ecx, Immediate(kSmiTagMask));
__ and_(ecx, eax);
__ test(ecx, edx);
@@ -1836,6 +1872,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(ebx);
}
+ if (IsSuperConstructorCall()) {
+ __ mov(edx, Operand(esp, eax, times_pointer_size, 2 * kPointerSize));
+ } else {
+ // Pass original constructor to construct stub.
+ __ mov(edx, edi);
+ }
+
// Jump to the function-specific construct stub.
Register jmp_reg = ecx;
__ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
@@ -1876,12 +1919,11 @@ static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
// edi - function
// edx - slot id
+ // ebx - vector
Label miss;
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, ebx);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &miss);
@@ -1897,6 +1939,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
__ mov(ebx, ecx);
+ __ mov(edx, edi);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@@ -1917,6 +1960,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
void CallICStub::Generate(MacroAssembler* masm) {
// edi - function
// edx - slot id
+ // ebx - vector
Isolate* isolate = masm->isolate();
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
@@ -1928,13 +1972,31 @@ void CallICStub::Generate(MacroAssembler* masm) {
int argc = arg_count();
ParameterCount actual(argc);
- EmitLoadTypeFeedbackVector(masm, ebx);
-
// The checks. First, does edi match the recorded monomorphic target?
- __ cmp(edi, FieldOperand(ebx, edx, times_half_pointer_size,
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize));
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
__ j(not_equal, &extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(edi, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@@ -1963,8 +2025,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&extra_checks_or_miss);
Label uninitialized, miss;
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
__ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ j(equal, &slow_start);
@@ -2008,15 +2068,18 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Update stats.
__ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
- // Store the function.
- __ mov(
- FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
- edi);
+ // Store the function. Use a stub since we need a frame for allocation.
+ // ebx - vector
+ // edx - slot
+ // edi - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(isolate);
+ __ push(edi);
+ __ CallStub(&create_stub);
+ __ pop(edi);
+ }
- // Update the write barrier.
- __ mov(eax, edi);
- __ RecordWriteArray(ebx, eax, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
__ jmp(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
@@ -2041,29 +2104,22 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(ecx, Operand(esp, (arg_count() + 1) * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the function and feedback info.
- __ push(ecx);
- __ push(edi);
- __ push(ebx);
- __ push(edx);
+ // Push the receiver and the function and feedback info.
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
- // Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
- ExternalReference miss = ExternalReference(IC_Utility(id),
- masm->isolate());
- __ CallExternalReference(miss, 4);
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 3);
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
- }
+ // Move result to edi and exit the internal frame.
+ __ mov(edi, eax);
}
@@ -2079,6 +2135,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the store buffer overflow stubs are generated first.
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -3437,15 +3494,17 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
Label miss;
+ Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &miss, Label::kNear);
+ __ GetWeakValue(edi, cell);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, known_map_);
+ __ cmp(ecx, edi);
__ j(not_equal, &miss, Label::kNear);
- __ cmp(ebx, known_map_);
+ __ cmp(ebx, edi);
__ j(not_equal, &miss, Label::kNear);
__ sub(eax, edx);
@@ -3994,6 +4053,20 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
}
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, ebx);
+ CallICStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, ebx);
+ CallIC_ArrayStub stub(isolate(), state());
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4215,9 +4288,10 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : argc (only if argument_count() == ANY)
+ // -- eax : argc (only if argument_count() is ANY or MORE_THAN_ONE)
// -- ebx : AllocationSite or undefined
// -- edi : constructor
+ // -- edx : Original constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
@@ -4237,6 +4311,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(ebx);
}
+ Label subclassing;
+
+ __ cmp(edx, edi);
+ __ j(not_equal, &subclassing);
+
Label no_info;
// If the feedback vector is the undefined value call an array constructor
// that doesn't use AllocationSites.
@@ -4252,6 +4331,30 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ // Subclassing.
+ __ bind(&subclassing);
+ __ pop(ecx); // return address.
+ __ push(edi);
+ __ push(edx);
+
+ // Adjust argc.
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ add(eax, Immediate(2));
+ break;
+ case NONE:
+ __ mov(eax, Immediate(2));
+ break;
+ case ONE:
+ __ mov(eax, Immediate(3));
+ break;
+ }
+
+ __ push(ecx);
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
}
@@ -4341,13 +4444,204 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+// Generates an Operand for saving parameters after PrepareCallApiFunction.
+static Operand ApiParameterOperand(int index) {
+ return Operand(esp, index * kPointerSize);
+}
+
+
+// Prepares stack to put arguments (aligns and so on). Reserves
+// space for return value if needed (assumes the return value is a handle).
+// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
+// etc. Saves context (esi). If space was reserved for return value then
+// stores the pointer to the reserved slot into esi.
+static void PrepareCallApiFunction(MacroAssembler* masm, int argc) {
+ __ EnterApiExitFrame(argc);
+ if (__ emit_debug_code()) {
+ __ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
+ }
+}
+
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Clobbers ebx, edi and
+// caller-save registers. Restores context. On return removes
+// stack_space * kPointerSize (GCed).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+ Register function_address,
+ ExternalReference thunk_ref,
+ Operand thunk_last_arg, int stack_space,
+ Operand* stack_space_operand,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
+ Isolate* isolate = masm->isolate();
+
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address(isolate);
+ ExternalReference level_address =
+ ExternalReference::handle_scope_level_address(isolate);
+
+ DCHECK(edx.is(function_address));
+ // Allocate HandleScope in callee-save registers.
+ __ mov(ebx, Operand::StaticVariable(next_address));
+ __ mov(edi, Operand::StaticVariable(limit_address));
+ __ add(Operand::StaticVariable(level_address), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, eax);
+ __ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ __ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
+ __ cmpb(Operand(eax, 0), 0);
+ __ j(zero, &profiler_disabled);
+
+ // Additional parameter is the address of the actual getter function.
+ __ mov(thunk_last_arg, function_address);
+ // Call the api function.
+ __ mov(eax, Immediate(thunk_ref));
+ __ call(eax);
+ __ jmp(&end_profiler_check);
+
+ __ bind(&profiler_disabled);
+ // Call the api function.
+ __ call(function_address);
+ __ bind(&end_profiler_check);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ PushSafepointRegisters();
+ __ PrepareCallCFunction(1, eax);
+ __ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate)));
+ __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+ 1);
+ __ PopSafepointRegisters();
+ }
+
+ Label prologue;
+ // Load the value from ReturnValue
+ __ mov(eax, return_value_operand);
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ __ bind(&prologue);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ mov(Operand::StaticVariable(next_address), ebx);
+ __ sub(Operand::StaticVariable(level_address), Immediate(1));
+ __ Assert(above_equal, kInvalidHandleScopeLevel);
+ __ cmp(edi, Operand::StaticVariable(limit_address));
+ __ j(not_equal, &delete_allocated_handles);
+ __ bind(&leave_exit_frame);
+
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address(isolate);
+ __ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(isolate->factory()->the_hole_value()));
+ __ j(not_equal, &promote_scheduled_exception);
+ __ bind(&exception_handled);
+
+#if DEBUG
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = eax;
+ Register map = ecx;
+
+ __ JumpIfSmi(return_value, &ok, Label::kNear);
+ __ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ __ CmpInstanceType(map, LAST_NAME_TYPE);
+ __ j(below_equal, &ok, Label::kNear);
+
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &ok, Label::kNear);
+
+ __ cmp(map, isolate->factory()->heap_number_map());
+ __ j(equal, &ok, Label::kNear);
+
+ __ cmp(return_value, isolate->factory()->undefined_value());
+ __ j(equal, &ok, Label::kNear);
+
+ __ cmp(return_value, isolate->factory()->true_value());
+ __ j(equal, &ok, Label::kNear);
+
+ __ cmp(return_value, isolate->factory()->false_value());
+ __ j(equal, &ok, Label::kNear);
+
+ __ cmp(return_value, isolate->factory()->null_value());
+ __ j(equal, &ok, Label::kNear);
+
+ __ Abort(kAPICallReturnedInvalidObject);
+
+ __ bind(&ok);
+#endif
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ mov(esi, *context_restore_operand);
+ }
+ if (stack_space_operand != nullptr) {
+ __ mov(ebx, *stack_space_operand);
+ }
+ __ LeaveApiExitFrame(!restore_context);
+ if (stack_space_operand != nullptr) {
+ DCHECK_EQ(0, stack_space);
+ __ pop(ecx);
+ __ add(esp, ebx);
+ __ jmp(ecx);
+ } else {
+ __ ret(stack_space * kPointerSize);
+ }
+
+ __ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ __ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ ExternalReference delete_extensions =
+ ExternalReference::delete_handle_scope_extensions(isolate);
+ __ bind(&delete_allocated_handles);
+ __ mov(Operand::StaticVariable(limit_address), edi);
+ __ mov(edi, eax);
+ __ mov(Operand(esp, 0),
+ Immediate(ExternalReference::isolate_address(isolate)));
+ __ mov(eax, Immediate(delete_extensions));
+ __ call(eax);
+ __ mov(eax, edi);
+ __ jmp(&leave_exit_frame);
+}
+
+
+static void CallApiFunctionStubHelper(MacroAssembler* masm,
+ const ParameterCount& argc,
+ bool return_first_arg,
+ bool call_data_undefined) {
// ----------- S t a t e -------------
- // -- eax : callee
+ // -- edi : callee
// -- ebx : call_data
// -- ecx : holder
// -- edx : api_function_address
// -- esi : context
+ // -- eax : number of arguments if argc is a register
// --
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -4356,16 +4650,12 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Register callee = eax;
+ Register callee = edi;
Register call_data = ebx;
Register holder = ecx;
Register api_function_address = edx;
- Register return_address = edi;
Register context = esi;
-
- int argc = this->argc();
- bool is_store = this->is_store();
- bool call_data_undefined = this->call_data_undefined();
+ Register return_address = eax;
typedef FunctionCallbackArguments FCA;
@@ -4378,12 +4668,17 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- __ pop(return_address);
+ DCHECK(argc.is_immediate() || eax.is(argc.reg()));
- // context save
- __ push(context);
- // load context from callee
- __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+ if (argc.is_immediate()) {
+ __ pop(return_address);
+ // context save.
+ __ push(context);
+ } else {
+ // pop return address and save context
+ __ xchg(context, Operand(esp, 0));
+ return_address = context;
+ }
// callee
__ push(callee);
@@ -4394,9 +4689,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register scratch = call_data;
if (!call_data_undefined) {
// return value
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ push(Immediate(masm->isolate()->factory()->undefined_value()));
// return value default
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ push(Immediate(masm->isolate()->factory()->undefined_value()));
} else {
// return value
__ push(scratch);
@@ -4404,15 +4699,18 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ push(scratch);
}
// isolate
- __ push(Immediate(reinterpret_cast<int>(isolate())));
+ __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
// holder
__ push(holder);
__ mov(scratch, esp);
- // return address
+ // push return address
__ push(return_address);
+ // load context from callee
+ __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+
// API function gets reference to the v8::Arguments. If CPU profiler
// is enabled wrapper function will be called and we need to pass
// address of the callback as additional parameter, always allocate
@@ -4423,41 +4721,76 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
+ PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
// FunctionCallbackInfo::implicit_args_.
__ mov(ApiParameterOperand(2), scratch);
- __ add(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), scratch);
- // FunctionCallbackInfo::length_.
- __ Move(ApiParameterOperand(4), Immediate(argc));
- // FunctionCallbackInfo::is_construct_call_.
- __ Move(ApiParameterOperand(5), Immediate(0));
+ if (argc.is_immediate()) {
+ __ add(scratch,
+ Immediate((argc.immediate() + FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ Move(ApiParameterOperand(4), Immediate(argc.immediate()));
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Move(ApiParameterOperand(5), Immediate(0));
+ } else {
+ __ lea(scratch, Operand(scratch, argc.reg(), times_pointer_size,
+ (FCA::kArgsLength - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
+ __ mov(ApiParameterOperand(3), scratch);
+ // FunctionCallbackInfo::length_.
+ __ mov(ApiParameterOperand(4), argc.reg());
+ // FunctionCallbackInfo::is_construct_call_.
+ __ lea(argc.reg(), Operand(argc.reg(), times_pointer_size,
+ (FCA::kArgsLength + 1) * kPointerSize));
+ __ mov(ApiParameterOperand(5), argc.reg());
+ }
// v8::InvocationCallback's argument.
__ lea(scratch, ApiParameterOperand(2));
__ mov(ApiParameterOperand(0), scratch);
ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(isolate());
+ ExternalReference::invoke_function_callback(masm->isolate());
Operand context_restore_operand(ebp,
(2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
- if (is_store) {
+ if (return_first_arg) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
Operand return_value_operand(ebp, return_value_offset * kPointerSize);
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- ApiParameterOperand(1),
- argc + FCA::kArgsLength + 1,
- return_value_operand,
- &context_restore_operand);
+ int stack_space = 0;
+ Operand is_construct_call_operand = ApiParameterOperand(5);
+ Operand* stack_space_operand = &is_construct_call_operand;
+ if (argc.is_immediate()) {
+ stack_space = argc.immediate() + FCA::kArgsLength + 1;
+ stack_space_operand = nullptr;
+ }
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ ApiParameterOperand(1), stack_space,
+ stack_space_operand, return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
+ call_data_undefined);
+}
+
+
+void CallApiAccessorStub::Generate(MacroAssembler* masm) {
+ bool is_store = this->is_store();
+ int argc = this->argc();
+ bool call_data_undefined = this->call_data_undefined();
+ CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
+ call_data_undefined);
}
@@ -4484,7 +4817,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// load address of name
__ lea(scratch, Operand(esp, 1 * kPointerSize));
- __ PrepareCallApiFunction(kApiArgc);
+ PrepareCallApiFunction(masm, kApiArgc);
__ mov(ApiParameterOperand(0), scratch); // name.
__ add(scratch, Immediate(kPointerSize));
__ mov(ApiParameterOperand(1), scratch); // arguments pointer.
@@ -4492,12 +4825,9 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- ApiParameterOperand(2),
- kStackSpace,
- Operand(ebp, 7 * kPointerSize),
- NULL);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ ApiParameterOperand(2), kStackSpace, nullptr,
+ Operand(ebp, 7 * kPointerSize), NULL);
}
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 0e53a0e8db..ffd2fa84ef 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -212,7 +212,8 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
- int parameter_count = function->shared()->formal_parameter_count() + 1;
+ int parameter_count =
+ function->shared()->internal_formal_parameter_count() + 1;
unsigned input_frame_size = input_->GetFrameSize();
unsigned alignment_state_offset =
input_frame_size - parameter_count * kPointerSize -
@@ -241,6 +242,9 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pushad();
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ __ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
+
// GP registers are safe to use now.
// Save used x87 fp registers in correct position of previous reserve space.
Label loop, done;
diff --git a/deps/v8/src/x87/disasm-x87.cc b/deps/v8/src/x87/disasm-x87.cc
index 8c77d7718d..7e0a07503f 100644
--- a/deps/v8/src/x87/disasm-x87.cc
+++ b/deps/v8/src/x87/disasm-x87.cc
@@ -893,6 +893,8 @@ int DisassemblerX87::RegisterFPUInstruction(int escape_opcode,
// Returns NULL if the instruction is not handled here.
static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
+ case 0x0B:
+ return "ud2";
case 0x18: return "prefetch";
case 0xA2: return "cpuid";
case 0xBE: return "movsx_b";
@@ -1057,7 +1059,7 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
data[7] == 0) {
AppendToBuffer("nop"); // 8 byte nop.
data += 8;
- } else if (f0byte == 0xA2 || f0byte == 0x31) {
+ } else if (f0byte == 0x0B || f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
} else if (f0byte == 0x28) {
diff --git a/deps/v8/src/x87/full-codegen-x87.cc b/deps/v8/src/x87/full-codegen-x87.cc
index 0f292cc49b..4ec21ae09c 100644
--- a/deps/v8/src/x87/full-codegen-x87.cc
+++ b/deps/v8/src/x87/full-codegen-x87.cc
@@ -114,7 +114,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ if (is_sloppy(info->language_mode()) && !info->is_native()) {
Label ok;
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@@ -143,7 +143,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!info->function()->is_generator() || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -190,7 +190,7 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in edi.
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ push(edi);
- __ Push(info->scope()->GetScopeInfo());
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
@@ -233,6 +233,26 @@ void FullCodeGenerator::Generate() {
}
}
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ lea(edx,
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ push(edx);
+ __ push(Immediate(Smi::FromInt(num_parameters)));
+ __ push(Immediate(Smi::FromInt(rest_index)));
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, eax, ebx, edx);
+ }
+
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
@@ -254,14 +274,18 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type);
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+ ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, eax, ebx, edx);
@@ -410,7 +434,11 @@ void FullCodeGenerator::EmitReturnSequence() {
int no_frame_start = masm_->pc_offset();
__ pop(ebp);
- int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ int arg_count = info_->scope()->num_parameters() + 1;
+ if (IsSubclassConstructor(info_->function()->kind())) {
+ arg_count++;
+ }
+ int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
@@ -858,15 +886,16 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
+ ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
- DCHECK(variable->interface()->IsFrozen());
+ DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(eax, scope_->ContextChainLength(scope_->ScriptScope()));
- __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
+ __ mov(eax, ContextOperand(eax, descriptor->Index()));
__ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
// Assign it.
@@ -1173,6 +1202,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
// Generate code for the body of the loop.
@@ -1210,7 +1240,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
@@ -1449,6 +1479,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
+ } else if (var->is_this()) {
+ CHECK(info_->function() != nullptr &&
+ (info_->function()->kind() & kSubclassConstructor) != 0);
+ // TODO(dslomov): implement 'this' hole check elimination.
+ skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
@@ -1606,11 +1641,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
__ push(eax); // Save result on the stack
@@ -1651,7 +1688,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
if (property->emit_store()) {
EmitSetHomeObjectIfNeeded(value, 2);
- __ push(Immediate(Smi::FromInt(SLOPPY))); // Strict mode
+ __ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
__ Drop(3);
@@ -1660,17 +1697,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::PROTOTYPE:
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(value);
- if (property->emit_store()) {
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
- } else {
- __ Drop(2);
- }
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
break;
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = value;
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = value;
+ }
break;
}
}
@@ -1690,6 +1728,65 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(eax); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ VisitForStackValue(value);
+ EmitSetHomeObjectIfNeeded(value, 2);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ break;
+ }
+ }
+ }
+
if (expr->has_function()) {
DCHECK(result_saved);
__ push(Operand(esp, 0));
@@ -1745,6 +1842,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
bool result_saved = false; // Is the result saved to the stack?
@@ -1883,18 +1981,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ push(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
- mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
+ EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
@@ -2275,7 +2369,6 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
- OverwriteMode mode,
Expression* left,
Expression* right) {
// Do combined smi check of the operands. Left operand is on the
@@ -2289,7 +2382,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2381,37 +2474,35 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
- Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- DCHECK(key != NULL);
if (property->is_static()) {
__ push(Operand(esp, kPointerSize)); // constructor
} else {
__ push(Operand(esp, 0)); // prototype
}
- VisitForStackValue(key);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
- __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ __ push(Immediate(Smi::FromInt(DONT_ENUM)));
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
break;
case ObjectLiteral::Property::SETTER:
- __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ __ push(Immediate(Smi::FromInt(DONT_ENUM)));
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
break;
-
- default:
- UNREACHABLE();
}
}
@@ -2423,11 +2514,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(edx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2502,7 +2591,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
break;
}
@@ -2568,7 +2657,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(strict_mode())));
+ __ push(Immediate(Smi::FromInt(language_mode())));
__ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
// Assignment to var or initializing assignment to let/const in harmony
@@ -2583,7 +2672,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+ } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
}
@@ -2618,8 +2707,8 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ push(Immediate(key->value()));
__ push(eax);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
+ __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
4);
}
@@ -2630,9 +2719,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
__ push(eax);
- __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime(
+ (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
@@ -2647,7 +2737,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2670,8 +2761,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
__ push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(eax);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
@@ -2686,8 +2775,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
- context()->Plug(eax);
}
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(eax);
}
@@ -2834,9 +2924,8 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(
- isolate(), arg_count, call_type);
- __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2864,7 +2953,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the language mode.
- __ push(Immediate(Smi::FromInt(strict_mode())));
+ __ push(Immediate(Smi::FromInt(language_mode())));
// Push the start position of the scope the calls resides in.
__ push(Immediate(Smi::FromInt(scope()->start_position())));
@@ -2874,8 +2963,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
- DCHECK(super_ref != NULL);
+void FullCodeGenerator::EmitLoadSuperConstructor() {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kGetPrototype, 1);
}
@@ -2931,7 +3019,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::GLOBAL_CALL) {
EmitCallWithLoadIC(expr);
-
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
VariableProxy* proxy = callee->AsVariableProxy();
@@ -2990,11 +3077,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
}
} else if (call_type == Call::SUPER_CALL) {
- SuperReference* super_ref = callee->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ push(result_register());
- VisitForStackValue(super_ref->this_var());
- EmitCall(expr, CallICState::METHOD);
+ EmitSuperConstructorCall(expr);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -3022,12 +3105,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- if (expr->expression()->IsSuperReference()) {
- EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
- __ push(result_register());
- } else {
- VisitForStackValue(expr->expression());
- }
+ DCHECK(!expr->expression()->IsSuperReference());
+ VisitForStackValue(expr->expression());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3061,6 +3140,66 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ if (!ValidateSuperCall(expr)) return;
+
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(eax, new_target_var);
+ __ push(eax);
+
+ EmitLoadSuperConstructor();
+ __ push(result_register());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into edi and eax.
+ __ Move(eax, Immediate(arg_count));
+ __ mov(edi, Operand(esp, arg_count * kPointerSize));
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ UNREACHABLE();
+ /* TODO(dslomov): support pretenuring.
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ */
+ }
+
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ RecordJSReturnSite(expr);
+
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ Variable* this_var = super_ref->this_var()->var();
+ GetVar(ecx, this_var);
+ __ cmp(ecx, isolate()->factory()->the_hole_value());
+ Label uninitialized_this;
+ __ j(equal, &uninitialized_this);
+ __ push(Immediate(this_var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
+ EmitVariableAssignment(this_var, Token::INIT_CONST);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3595,7 +3734,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
@@ -3931,6 +4070,56 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(eax, new_target_var);
+ __ push(eax);
+
+ EmitLoadSuperConstructor();
+ __ push(result_register());
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, args_set_up, runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame);
+ // default constructor has no arguments, so no adaptor frame means no args.
+ __ mov(eax, Immediate(0));
+ __ jmp(&args_set_up);
+
+ // Copy arguments from adaptor frame.
+ {
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(ecx);
+
+ // Subtract 1 from arguments count, for new.target.
+ __ sub(ecx, Immediate(1));
+ __ mov(eax, ecx);
+ __ lea(edx, Operand(edx, ecx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ Label loop;
+ __ bind(&loop);
+ __ push(Operand(edx, -1 * kPointerSize));
+ __ sub(edx, Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+ }
+
+ __ bind(&args_set_up);
+
+ __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
+ __ mov(ebx, Immediate(isolate()->factory()->undefined_value()));
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
RegExpConstructResultStub stub(isolate());
@@ -3950,7 +4139,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(2, args->length());
- DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ DCHECK_NOT_NULL(args->at(0)->AsLiteral());
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
Handle<FixedArray> jsfunction_result_caches(
@@ -4379,14 +4568,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ push(Immediate(Smi::FromInt(strict_mode())));
+ __ push(Immediate(Smi::FromInt(language_mode())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- DCHECK(strict_mode() == SLOPPY || var->is_this());
+ DCHECK(is_sloppy(language_mode()) || var->is_this());
if (var->IsUnallocated()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
@@ -4607,6 +4796,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4641,8 +4831,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
- NO_OVERWRITE).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4712,7 +4902,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(StoreDescriptor::NameRegister());
__ pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 26ce4dcb85..c9ef8d1183 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -101,7 +101,19 @@ void FastCloneShallowObjectDescriptor::Initialize(
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {esi, ebx, edx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ebx, edx, edi};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
@@ -128,6 +140,16 @@ void CallFunctionWithFeedbackDescriptor::Initialize(
}
+void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edi, edx, ebx};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
@@ -296,7 +318,28 @@ void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {
esi, // context
- eax, // callee
+ edi, // callee
+ ebx, // call_data
+ ecx, // holder
+ edx, // api_function_address
+ eax, // actual number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Integer32(), // actual number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ esi, // context
+ edi, // callee
ebx, // call_data
ecx, // holder
edx, // api_function_address
diff --git a/deps/v8/src/x87/lithium-codegen-x87.cc b/deps/v8/src/x87/lithium-codegen-x87.cc
index c292bb94b3..07be757ed5 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/x87/lithium-codegen-x87.cc
@@ -76,7 +76,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -110,8 +109,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() &&
- info_->strict_mode() == SLOPPY &&
+ if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
// +1 for return address.
@@ -383,7 +381,7 @@ bool LCodeGen::GenerateJumpTable() {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
- DeoptComment(table_entry->reason);
+ DeoptComment(table_entry->deopt_info);
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
@@ -1082,7 +1080,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- const char* detail,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -1145,18 +1143,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
__ bind(&done);
}
- Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), detail);
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
- DeoptComment(reason);
+ DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.is_empty() ||
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
@@ -1170,11 +1169,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- const char* detail) {
+ Deoptimizer::DeoptReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, instr, detail, bailout_type);
+ DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
}
@@ -1196,6 +1195,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
+ data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1413,7 +1413,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
@@ -1430,7 +1430,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, "division by zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1445,7 +1445,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr, "minus zero");
+ DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -1467,7 +1467,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr, "division by zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1478,7 +1478,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, "minus zero");
+ DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
@@ -1497,7 +1497,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@@ -1517,19 +1517,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr, "overflow");
+ DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, "lost precision");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1550,7 +1550,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, "division by zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1558,7 +1558,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
@@ -1568,7 +1568,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1588,7 +1588,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, "division by zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1597,7 +1597,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1607,7 +1607,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, "overflow");
+ DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1618,7 +1618,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr, "lost precision");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
}
}
@@ -1640,13 +1640,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
return;
}
@@ -1673,7 +1673,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, "division by zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
@@ -1681,7 +1681,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, "minus zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1728,7 +1728,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, "division by zero");
+ DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1737,7 +1737,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1747,7 +1747,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, "overflow");
+ DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1825,7 +1825,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1835,15 +1835,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ j(not_zero, &done);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, "minus zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr, "minus zero");
+ DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr, "minus zero");
+ DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
}
__ bind(&done);
}
@@ -1916,7 +1916,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, "negative value");
+ DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
}
break;
case Token::SHL:
@@ -1933,7 +1933,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, "negative value");
+ DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
} else {
__ ror(ToRegister(left), shift_count);
}
@@ -1948,7 +1948,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, "negative value");
+ DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
}
break;
case Token::SHL:
@@ -1959,7 +1959,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
} else {
__ shl(ToRegister(left), shift_count);
}
@@ -1985,7 +1985,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
@@ -2001,10 +2001,9 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
- double v = instr->value();
- uint64_t int_val = bit_cast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ uint64_t const bits = instr->bits();
+ uint32_t const lower = static_cast<uint32_t>(bits);
+ uint32_t const upper = static_cast<uint32_t>(bits >> 32);
DCHECK(instr->result()->IsDoubleRegister());
__ push(Immediate(upper));
@@ -2045,9 +2044,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
DCHECK(object.is(eax));
__ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, "Smi");
+ DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr, "not a date object");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
@@ -2177,7 +2176,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
}
@@ -2319,8 +2318,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(eax));
DCHECK(ToRegister(instr->result()).is(eax));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2420,7 +2418,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, "Smi");
+ DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
}
Register map = no_reg; // Keep the compiler happy.
@@ -2477,7 +2475,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, "unexpected object");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
}
}
}
@@ -2613,7 +2611,9 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ add(esp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
- __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
+ // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
+ // so we check the upper with 0xffffffff for hole as a temporary fix.
+ __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
EmitBranch(instr, equal);
}
@@ -3117,7 +3117,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
}
@@ -3166,7 +3166,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
// Store the value.
@@ -3183,7 +3183,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@@ -3204,7 +3204,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
@@ -3298,7 +3298,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -3389,7 +3389,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr, "negative value");
+ DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3419,7 +3419,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
@@ -3445,10 +3445,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr, "not a Smi");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, "hole");
+ DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
}
}
@@ -3595,9 +3595,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, "Smi");
+ DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr, "not a JavaScript object");
+ DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@@ -3623,7 +3623,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr, "too many arguments");
+ DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
__ push(receiver);
__ mov(receiver, length);
@@ -3693,22 +3693,18 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- EDIState edi_state) {
+ int formal_parameter_count, int arity,
+ LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
- if (can_invoke_directly) {
- if (edi_state == EDI_UNINITIALIZED) {
- __ LoadHeapObject(edi, function);
- }
+ Register function_reg = edi;
+ if (can_invoke_directly) {
// Change context.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
// Set eax to arguments count if adaption is not needed. Assumes that eax
// is available to write to at this point.
@@ -3720,7 +3716,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
}
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
@@ -3730,7 +3726,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
}
}
@@ -3852,7 +3848,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
@@ -3899,7 +3895,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, "overflow");
+ DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
__ bind(&is_positive);
}
@@ -3952,7 +3948,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ fldz();
__ fld(1);
__ FCmp();
- DeoptimizeIf(parity_even, instr, "NaN");
+ DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
__ j(below, &not_minus_zero, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3961,7 +3957,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// +- 0.0.
__ fld(0);
__ FXamSign();
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kFar);
}
@@ -3975,7 +3971,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ fist_s(Operand(esp, 0));
__ pop(output_reg);
__ X87CheckIA();
- DeoptimizeIf(equal, instr, "overflow");
+ DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
__ fnclex();
__ X87SetRC(0x0000);
__ bind(&done);
@@ -4009,7 +4005,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Check overflow.
__ X87CheckIA();
__ pop(result);
- DeoptimizeIf(equal, instr, "conversion overflow");
+ DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
__ fnclex();
// Restore round mode.
__ X87SetRC(0x0000);
@@ -4026,7 +4022,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// If the sign is positive, we return +0.
__ fld(0);
__ FXamSign();
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ Move(result, Immediate(0));
__ jmp(&done);
@@ -4043,7 +4039,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Check overflow.
__ X87CheckIA();
__ pop(result);
- DeoptimizeIf(equal, instr, "conversion overflow");
+ DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
__ fnclex();
// Restore round mode.
__ X87SetRC(0x0000);
@@ -4175,7 +4171,7 @@ void LCodeGen::DoPower(LPower* instr) {
X87LoadForUsage(base);
__ JumpIfSmi(exponent, &no_deopt);
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
// Heap number(double)
__ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -4246,10 +4242,11 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ jmp(&done, Label::kNear);
__ bind(&nan_result);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
X87PrepareToWrite(input_reg);
- __ fld_d(Operand::StaticVariable(nan));
+ __ push(Immediate(0xffffffff));
+ __ push(Immediate(0x7fffffff));
+ __ fld_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
X87CommitWrite(input_reg);
__ jmp(&done, Label::kNear);
@@ -4355,9 +4352,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- EDI_CONTAINS_TARGET);
+ instr->arity(), instr);
}
}
@@ -4368,8 +4363,30 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ Register slot_register = ToRegister(instr->temp_slot());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(slot_register.is(edx));
+ DCHECK(vector_register.is(ebx));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+
+ __ mov(vector_register, vector);
+ __ mov(slot_register, Immediate(Smi::FromInt(index)));
+
+ CallICState::CallType call_type =
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
+
+ Handle<Code> ic =
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ CallFunctionStub stub(isolate(), arity, flags);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4553,7 +4570,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4578,7 +4595,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, "out of bounds");
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
}
}
@@ -4602,7 +4619,32 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
- X87Mov(operand, ToX87Register(instr->value()));
+ uint64_t int_val = kHoleNanInt64;
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ Operand operand2 = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ instr->hydrogen()->key()->representation(), elements_kind,
+ instr->base_offset() + kPointerSize);
+
+ Label no_special_nan_handling, done;
+ X87Register value = ToX87Register(instr->value());
+ X87Fxch(value);
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ int offset = sizeof(kHoleNanUpper32);
+ // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
+ // so we check the upper with 0xffffffff for hole as a temporary fix.
+ __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+ __ j(not_equal, &no_special_nan_handling, Label::kNear);
+ __ mov(operand, Immediate(lower));
+ __ mov(operand2, Immediate(upper));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&no_special_nan_handling);
+ __ fst_d(operand);
+ __ bind(&done);
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
@@ -4646,8 +4688,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
@@ -4655,25 +4695,21 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset());
- // Can't use SSE2 in the serializer
+ uint64_t int_val = kHoleNanInt64;
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ Operand double_store_operand2 = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS,
+ instr->base_offset() + kPointerSize);
+
if (instr->hydrogen()->IsConstantHoleStore()) {
// This means we should store the (double) hole. No floating point
// registers required.
- double nan_double = FixedDoubleArray::hole_nan_as_double();
- uint64_t int_val = bit_cast<uint64_t, double>(nan_double);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
-
__ mov(double_store_operand, Immediate(lower));
- Operand double_store_operand2 = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset() + kPointerSize);
__ mov(double_store_operand2, Immediate(upper));
} else {
- Label no_special_nan_handling;
+ Label no_special_nan_handling, done;
X87Register value = ToX87Register(instr->value());
X87Fxch(value);
@@ -4681,23 +4717,27 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ fld(0);
__ fld(0);
__ FCmp();
-
__ j(parity_odd, &no_special_nan_handling, Label::kNear);
- __ sub(esp, Immediate(kDoubleSize));
+ // All NaNs are Canonicalized to 0x7fffffffffffffff
+ __ mov(double_store_operand, Immediate(0xffffffff));
+ __ mov(double_store_operand2, Immediate(0x7fffffff));
+ __ jmp(&done, Label::kNear);
+ } else {
+ __ lea(esp, Operand(esp, -kDoubleSize));
__ fst_d(MemOperand(esp, 0));
- __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
- Immediate(kHoleNanUpper32));
- __ add(esp, Immediate(kDoubleSize));
- Label canonicalize;
- __ j(not_equal, &canonicalize, Label::kNear);
- __ jmp(&no_special_nan_handling, Label::kNear);
- __ bind(&canonicalize);
- __ fstp(0);
- __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ int offset = sizeof(kHoleNanUpper32);
+ // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
+ // so we check the upper with 0xffffffff for hole as a temporary fix.
+ __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+ __ j(not_equal, &no_special_nan_handling, Label::kNear);
+ __ mov(double_store_operand, Immediate(lower));
+ __ mov(double_store_operand2, Immediate(upper));
+ __ jmp(&done, Label::kNear);
}
-
__ bind(&no_special_nan_handling);
__ fst_d(double_store_operand);
+ __ bind(&done);
}
}
@@ -4761,7 +4801,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4771,7 +4811,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, "memento found");
+ DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4796,7 +4836,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
Immediate(to_map));
// Write barrier.
- DCHECK_NE(instr->temp(), NULL);
+ DCHECK_NOT_NULL(instr->temp());
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
ToRegister(instr->temp()), kDontSaveFPRegs);
} else {
@@ -5143,12 +5183,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr, "overflow");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
@@ -5159,7 +5199,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, "not a Smi");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
} else {
__ AssertSmi(result);
}
@@ -5185,19 +5225,20 @@ void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
} else {
Label heap_number, convert;
__ j(equal, &heap_number);
// Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ bind(&convert);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ fld_d(Operand::StaticVariable(nan));
+ __ push(Immediate(0xffffffff));
+ __ push(Immediate(0x7fffffff));
+ __ fld_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
__ jmp(&done, Label::kNear);
__ bind(&heap_number);
@@ -5217,7 +5258,7 @@ void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
// Pop FPU stack before deoptimizing.
__ fstp(0);
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
} else {
@@ -5270,14 +5311,15 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
- DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
+ DeoptimizeIf(not_equal, instr,
+ Deoptimizer::kNotAHeapNumberUndefinedBoolean);
__ Move(input_reg, Immediate(0));
} else {
// TODO(olivf) Converting a number on the fpu is actually quite slow. We
// should first try a fast conversion and then bailout to this slow case.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, "not a heap number");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
__ sub(esp, Immediate(kPointerSize));
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
@@ -5293,12 +5335,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ j(equal, &no_precision_lost, Label::kNear);
__ fstp(0);
- DeoptimizeIf(no_condition, instr, "lost precision");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
__ bind(&no_precision_lost);
__ j(parity_odd, &not_nan);
__ fstp(0);
- DeoptimizeIf(no_condition, instr, "NaN");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
__ bind(&not_nan);
__ test(input_reg, Operand(input_reg));
@@ -5313,14 +5355,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ fstp_s(Operand(esp, 0));
__ pop(input_reg);
__ test(input_reg, Operand(input_reg));
- DeoptimizeIf(not_zero, instr, "minus zero");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
} else {
__ fist_s(MemOperand(esp, 0));
__ fild_s(MemOperand(esp, 0));
__ FCmp();
__ pop(input_reg);
- DeoptimizeIf(not_equal, instr, "lost precision");
- DeoptimizeIf(parity_even, instr, "NaN");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
}
}
}
@@ -5401,11 +5443,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
&lost_precision, &is_nan, &minus_zero);
__ jmp(&done);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, "lost precision");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, "NaN");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, "minus zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
@@ -5425,21 +5467,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
&lost_precision, &is_nan, &minus_zero);
__ jmp(&done);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, "lost precision");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, "NaN");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, "minus zero");
+ DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
__ bind(&done);
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr, "overflow");
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, "not a Smi");
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
}
@@ -5447,7 +5489,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, "Smi");
+ DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
}
}
@@ -5468,14 +5510,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr, "wrong instance type");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
} else {
- DeoptimizeIf(below, instr, "wrong instance type");
+ DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(last));
- DeoptimizeIf(above, instr, "wrong instance type");
+ DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
}
}
} else {
@@ -5486,12 +5528,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
+ Deoptimizer::kWrongInstanceType);
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr, "wrong instance type");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
}
}
}
@@ -5507,7 +5550,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr, "value mismatch");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
}
@@ -5522,7 +5565,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, "instance migration failed");
+ DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
}
@@ -5579,7 +5622,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr, "wrong map");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
}
__ bind(&success);
@@ -5620,7 +5663,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ jmp(&zero_result, Label::kNear);
// Heap number
@@ -5930,7 +5973,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -6191,17 +6234,17 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr, "undefined");
+ DeoptimizeIf(equal, instr, Deoptimizer::kUndefined);
__ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr, "null");
+ DeoptimizeIf(equal, instr, Deoptimizer::kNull);
__ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, "Smi");
+ DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr, "wrong instance type");
+ DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -6216,7 +6259,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr, "wrong map");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
__ bind(&use_cache);
}
@@ -6239,7 +6282,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr, "no cache");
+ DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
}
@@ -6247,7 +6290,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, "wrong map");
+ DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
}
diff --git a/deps/v8/src/x87/lithium-codegen-x87.h b/deps/v8/src/x87/lithium-codegen-x87.h
index 2f4a8d3111..6b191b96ee 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/x87/lithium-codegen-x87.h
@@ -160,7 +160,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictMode strict_mode() const { return info()->strict_mode(); }
+ LanguageMode language_mode() const { return info()->language_mode(); }
Scope* scope() const { return scope_; }
@@ -218,27 +218,22 @@ class LCodeGen: public LCodeGenBase {
void LoadContextFromDeferred(LOperand* context);
- enum EDIState {
- EDI_UNINITIALIZED,
- EDI_CONTAINS_TARGET
- };
-
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- EDIState edi_state);
+ int formal_parameter_count, int arity,
+ LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
+ void DeoptimizeIf(Condition cc, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
+ void DeoptimizeIf(Condition cc, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -413,6 +408,7 @@ class LCodeGen: public LCodeGenBase {
int st(X87Register reg) { return st2idx(ArrayIndex(reg)); }
void pop() {
DCHECK(is_mutable_);
+ USE(is_mutable_);
stack_depth_--;
}
void push(X87Register reg) {
diff --git a/deps/v8/src/x87/lithium-x87.cc b/deps/v8/src/x87/lithium-x87.cc
index 3a52dc4b0f..eeccd1bcff 100644
--- a/deps/v8/src/x87/lithium-x87.cc
+++ b/deps/v8/src/x87/lithium-x87.cc
@@ -285,6 +285,20 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
+void LCallFunction::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ function()->PrintTo(stream);
+ if (hydrogen()->HasVectorAndSlot()) {
+ stream->Add(" (type-feedback-vector ");
+ temp_vector()->PrintTo(stream);
+ stream->Add(" ");
+ temp_slot()->PrintTo(stream);
+ stream->Add(")");
+ }
+}
+
+
void LCallJSFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -1291,7 +1305,15 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(edx);
+ vector = FixedTemp(ebx);
+ }
+
+ LCallFunction* call =
+ new (zone()) LCallFunction(context, function, slot, vector);
return MarkAsCall(DefineFixed(call, eax), instr);
}
@@ -1721,7 +1743,7 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpHoleAndBranch(value);
+ return new (zone()) LCmpHoleAndBranch(value);
}
diff --git a/deps/v8/src/x87/lithium-x87.h b/deps/v8/src/x87/lithium-x87.h
index 5d27fcb7a3..ccd197fc4b 100644
--- a/deps/v8/src/x87/lithium-x87.h
+++ b/deps/v8/src/x87/lithium-x87.h
@@ -1350,7 +1350,7 @@ class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- double value() const { return hydrogen()->DoubleValue(); }
+ uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
};
@@ -1957,19 +1957,25 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- explicit LCallFunction(LOperand* context, LOperand* function) {
+ LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = function;
+ temps_[0] = slot;
+ temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+ void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -2232,7 +2238,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
@@ -2289,7 +2295,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
void PrintDataTo(StringStream* stream) OVERRIDE;
- StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 008b2af63d..41b93d9239 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -597,30 +597,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
fail,
DONT_DO_SMI_CHECK);
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- cmp(FieldOperand(maybe_number, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- j(greater_equal, &maybe_nan, Label::kNear);
-
- bind(&not_nan);
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- j(greater, &is_nan, Label::kNear);
- cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
- j(zero, &not_nan);
- bind(&is_nan);
- fld_d(Operand::StaticVariable(canonical_nan_reference));
- jmp(&have_double_value, Label::kNear);
+ jmp(&done, Label::kNear);
bind(&smi_value);
// Value is a smi. Convert to a double and store.
@@ -630,9 +608,9 @@ void MacroAssembler::StoreNumberToDoubleElements(
push(scratch);
fild_s(Operand(esp, 0));
pop(scratch);
+ bind(&done);
fstp_d(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset));
- bind(&done);
}
@@ -1324,7 +1302,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Check that the value is a field property.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- DCHECK_EQ(FIELD, 0);
+ DCHECK_EQ(DATA, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
j(not_zero, miss);
@@ -2048,169 +2026,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-Operand ApiParameterOperand(int index) {
- return Operand(esp, index * kPointerSize);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int argc) {
- EnterApiExitFrame(argc);
- if (emit_debug_code()) {
- mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address,
- ExternalReference thunk_ref,
- Operand thunk_last_arg,
- int stack_space,
- Operand return_value_operand,
- Operand* context_restore_operand) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- ExternalReference limit_address =
- ExternalReference::handle_scope_limit_address(isolate());
- ExternalReference level_address =
- ExternalReference::handle_scope_level_address(isolate());
-
- DCHECK(edx.is(function_address));
- // Allocate HandleScope in callee-save registers.
- mov(ebx, Operand::StaticVariable(next_address));
- mov(edi, Operand::StaticVariable(limit_address));
- add(Operand::StaticVariable(level_address), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, eax);
- mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
-
- Label profiler_disabled;
- Label end_profiler_check;
- mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
- cmpb(Operand(eax, 0), 0);
- j(zero, &profiler_disabled);
-
- // Additional parameter is the address of the actual getter function.
- mov(thunk_last_arg, function_address);
- // Call the api function.
- mov(eax, Immediate(thunk_ref));
- call(eax);
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- // Call the api function.
- call(function_address);
- bind(&end_profiler_check);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(1, eax);
- mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- Label prologue;
- // Load the value from ReturnValue
- mov(eax, return_value_operand);
-
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- bind(&prologue);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- mov(Operand::StaticVariable(next_address), ebx);
- sub(Operand::StaticVariable(level_address), Immediate(1));
- Assert(above_equal, kInvalidHandleScopeLevel);
- cmp(edi, Operand::StaticVariable(limit_address));
- j(not_equal, &delete_allocated_handles);
- bind(&leave_exit_frame);
-
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate());
- cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(isolate()->factory()->the_hole_value()));
- j(not_equal, &promote_scheduled_exception);
- bind(&exception_handled);
-
-#if ENABLE_EXTRA_CHECKS
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = eax;
- Register map = ecx;
-
- JumpIfSmi(return_value, &ok, Label::kNear);
- mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- j(below, &ok, Label::kNear);
-
- CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- j(above_equal, &ok, Label::kNear);
-
- cmp(map, isolate()->factory()->heap_number_map());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->undefined_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->true_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->false_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->null_value());
- j(equal, &ok, Label::kNear);
-
- Abort(kAPICallReturnedInvalidObject);
-
- bind(&ok);
-#endif
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- mov(esi, *context_restore_operand);
- }
- LeaveApiExitFrame(!restore_context);
- ret(stack_space * kPointerSize);
-
- bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
- }
- jmp(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- ExternalReference delete_extensions =
- ExternalReference::delete_handle_scope_extensions(isolate());
- bind(&delete_allocated_handles);
- mov(Operand::StaticVariable(limit_address), edi);
- mov(edi, eax);
- mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate())));
- mov(eax, Immediate(delete_extensions));
- call(eax);
- mov(eax, edi);
- jmp(&leave_exit_frame);
-}
-
-
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
@@ -2551,10 +2366,15 @@ void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
}
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, cell);
mov(value, FieldOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+ Label* miss) {
+ GetWeakValue(value, cell);
JumpIfSmi(value, miss);
}
@@ -2780,6 +2600,18 @@ void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
}
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+ int accessor_index,
+ AccessorComponent accessor) {
+ mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
+ LoadInstanceDescriptors(dst, dst);
+ mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+ int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
+ : AccessorPair::kSetterOffset;
+ mov(dst, FieldOperand(dst, offset));
+}
+
+
void MacroAssembler::LookupNumberStringCache(Register object,
Register result,
Register scratch1,
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index b7972651d5..c25203f2d1 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -274,6 +274,7 @@ class MacroAssembler: public Assembler {
}
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+ void GetWeakValue(Register value, Handle<WeakCell> cell);
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// ---------------------------------------------------------------------------
@@ -485,6 +486,8 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template<typename Field>
void DecodeField(Register reg) {
@@ -755,24 +758,6 @@ class MacroAssembler: public Assembler {
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
- // Prepares stack to put arguments (aligns and so on). Reserves
- // space for return value if needed (assumes the return value is a handle).
- // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
- // etc. Saves context (esi). If space was reserved for return value then
- // stores the pointer to the reserved slot into esi.
- void PrepareCallApiFunction(int argc);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Clobbers ebx, edi and
- // caller-save registers. Restores context. On return removes
- // stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref,
- Operand thunk_last_arg,
- int stack_space,
- Operand return_value_operand,
- Operand* context_restore_operand);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
@@ -1051,10 +1036,6 @@ inline Operand GlobalObjectOperand() {
}
-// Generates an Operand for saving parameters after PrepareCallApiFunction.
-Operand ApiParameterOperand(int index);
-
-
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/deps/v8/src/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/x87/regexp-macro-assembler-x87.cc
index 9bd08caa2a..cc3f34e420 100644
--- a/deps/v8/src/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/x87/regexp-macro-assembler-x87.cc
@@ -77,12 +77,11 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-RegExpMacroAssemblerX87::RegExpMacroAssemblerX87(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+RegExpMacroAssemblerX87::RegExpMacroAssemblerX87(Isolate* isolate, Zone* zone,
+ Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
diff --git a/deps/v8/src/x87/regexp-macro-assembler-x87.h b/deps/v8/src/x87/regexp-macro-assembler-x87.h
index 3655bd9626..f893262704 100644
--- a/deps/v8/src/x87/regexp-macro-assembler-x87.h
+++ b/deps/v8/src/x87/regexp-macro-assembler-x87.h
@@ -15,7 +15,8 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerX87(Mode mode, int registers_to_save, Zone* zone);
+ RegExpMacroAssemblerX87(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
virtual ~RegExpMacroAssemblerX87();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
diff --git a/deps/v8/src/zone-containers.h b/deps/v8/src/zone-containers.h
index b0ff7b6cf1..0a599bcf2b 100644
--- a/deps/v8/src/zone-containers.h
+++ b/deps/v8/src/zone-containers.h
@@ -7,7 +7,9 @@
#include <deque>
#include <list>
+#include <map>
#include <queue>
+#include <set>
#include <stack>
#include <vector>
@@ -27,12 +29,12 @@ class ZoneVector : public std::vector<T, zone_allocator<T>> {
// Constructs a new vector and fills it with {size} elements, each
// constructed via the default constructor.
- ZoneVector(int size, Zone* zone)
+ ZoneVector(size_t size, Zone* zone)
: std::vector<T, zone_allocator<T>>(size, T(), zone_allocator<T>(zone)) {}
// Constructs a new vector and fills it with {size} elements, each
// having the value {def}.
- ZoneVector(int size, T def, Zone* zone)
+ ZoneVector(size_t size, T def, Zone* zone)
: std::vector<T, zone_allocator<T>>(size, def, zone_allocator<T>(zone)) {}
};
@@ -83,6 +85,31 @@ class ZoneStack : public std::stack<T, ZoneDeque<T>> {
};
+// A wrapper subclass for std::set to make it easy to construct one that uses
+// a zone allocator.
+template <typename K, typename Compare = std::less<K>>
+class ZoneSet : public std::set<K, Compare, zone_allocator<K>> {
+ public:
+ // Constructs an empty set.
+ explicit ZoneSet(Zone* zone)
+ : std::set<K, Compare, zone_allocator<K>>(Compare(),
+ zone_allocator<K>(zone)) {}
+};
+
+
+// A wrapper subclass for std::map to make it easy to construct one that uses
+// a zone allocator.
+template <typename K, typename V, typename Compare = std::less<K>>
+class ZoneMap
+ : public std::map<K, V, Compare, zone_allocator<std::pair<K, V>>> {
+ public:
+ // Constructs an empty map.
+ explicit ZoneMap(Zone* zone)
+ : std::map<K, V, Compare, zone_allocator<std::pair<K, V>>>(
+ Compare(), zone_allocator<std::pair<K, V>>(zone)) {}
+};
+
+
// Typedefs to shorten commonly used vectors.
typedef ZoneVector<bool> BoolVector;
typedef ZoneVector<int> IntVector;
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
deleted file mode 100644
index 63efe16818..0000000000
--- a/deps/v8/src/zone-inl.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ZONE_INL_H_
-#define V8_ZONE_INL_H_
-
-#include "src/zone.h"
-
-#ifdef V8_USE_ADDRESS_SANITIZER
- #include <sanitizer/asan_interface.h>
-#else
- #define ASAN_UNPOISON_MEMORY_REGION(start, size) ((void) 0)
-#endif
-
-#include "src/counters.h"
-#include "src/isolate.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-
-static const int kASanRedzoneBytes = 24; // Must be a multiple of 8.
-
-
-bool Zone::excess_allocation() {
- return segment_bytes_allocated_ > kExcessLimit;
-}
-
-
-void Zone::adjust_segment_bytes_allocated(int delta) {
- segment_bytes_allocated_ += delta;
-}
-
-
-template <typename Config>
-ZoneSplayTree<Config>::~ZoneSplayTree() {
- // Reset the root to avoid unneeded iteration over all tree nodes
- // in the destructor. For a zone-allocated tree, nodes will be
- // freed by the Zone.
- SplayTree<Config, ZoneAllocationPolicy>::ResetRoot();
-}
-
-
-void* ZoneObject::operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
-}
-
-inline void* ZoneAllocationPolicy::New(size_t size) {
- DCHECK(zone_);
- return zone_->New(static_cast<int>(size));
-}
-
-
-template <typename T>
-void* ZoneList<T>::operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
-}
-
-
-template <typename T>
-void* ZoneSplayTree<T>::operator new(size_t size, Zone* zone) {
- return zone->New(static_cast<int>(size));
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ZONE_INL_H_
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index eb2e532a22..5a7245bd44 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -2,14 +2,45 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <string.h>
+#include "src/zone.h"
+
+#include <cstring>
#include "src/v8.h"
-#include "src/zone-inl.h"
+
+#ifdef V8_USE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif // V8_USE_ADDRESS_SANITIZER
namespace v8 {
namespace internal {
+namespace {
+
+#if V8_USE_ADDRESS_SANITIZER
+
+const size_t kASanRedzoneBytes = 24; // Must be a multiple of 8.
+
+#else
+
+#define ASAN_POISON_MEMORY_REGION(start, size) \
+ do { \
+ USE(start); \
+ USE(size); \
+ } while (false)
+
+#define ASAN_UNPOISON_MEMORY_REGION(start, size) \
+ do { \
+ USE(start); \
+ USE(size); \
+ } while (false)
+
+const size_t kASanRedzoneBytes = 0;
+
+#endif // V8_USE_ADDRESS_SANITIZER
+
+} // namespace
+
// Segments represent chunks of memory: They have starting address
// (encoded in the this pointer) and a size in bytes. Segments are
@@ -19,39 +50,35 @@ namespace internal {
class Segment {
public:
- void Initialize(Segment* next, int size) {
+ void Initialize(Segment* next, size_t size) {
next_ = next;
size_ = size;
}
Segment* next() const { return next_; }
- void clear_next() { next_ = NULL; }
+ void clear_next() { next_ = nullptr; }
- int size() const { return size_; }
- int capacity() const { return size_ - sizeof(Segment); }
+ size_t size() const { return size_; }
+ size_t capacity() const { return size_ - sizeof(Segment); }
Address start() const { return address(sizeof(Segment)); }
Address end() const { return address(size_); }
private:
// Computes the address of the nth byte in this segment.
- Address address(int n) const {
- return Address(this) + n;
- }
+ Address address(size_t n) const { return Address(this) + n; }
Segment* next_;
- int size_;
+ size_t size_;
};
-Zone::Zone(Isolate* isolate)
+Zone::Zone()
: allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
limit_(0),
- segment_head_(NULL),
- isolate_(isolate) {
-}
+ segment_head_(nullptr) {}
Zone::~Zone() {
@@ -62,7 +89,7 @@ Zone::~Zone() {
}
-void* Zone::New(int size) {
+void* Zone::New(size_t size) {
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -77,24 +104,16 @@ void* Zone::New(int size) {
// Check if the requested size is available without expanding.
Address result = position_;
- int size_with_redzone =
-#ifdef V8_USE_ADDRESS_SANITIZER
- size + kASanRedzoneBytes;
-#else
- size;
-#endif
-
- if (size_with_redzone > limit_ - position_) {
- result = NewExpand(size_with_redzone);
+ const size_t size_with_redzone = size + kASanRedzoneBytes;
+ if (limit_ < position_ + size_with_redzone) {
+ result = NewExpand(size_with_redzone);
} else {
- position_ += size_with_redzone;
+ position_ += size_with_redzone;
}
-#ifdef V8_USE_ADDRESS_SANITIZER
Address redzone_position = result + size;
DCHECK(redzone_position + kASanRedzoneBytes == position_);
ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
-#endif
// Check that the result has the proper alignment and return it.
DCHECK(IsAddressAligned(result, kAlignment, 0));
@@ -110,17 +129,17 @@ void Zone::DeleteAll() {
#endif
// Find a segment with a suitable size to keep around.
- Segment* keep = NULL;
+ Segment* keep = nullptr;
// Traverse the chained list of segments, zapping (in debug mode)
// and freeing every segment except the one we wish to keep.
- for (Segment* current = segment_head_; current != NULL; ) {
+ for (Segment* current = segment_head_; current;) {
Segment* next = current->next();
- if (keep == NULL && current->size() <= kMaximumKeptSegmentSize) {
+ if (!keep && current->size() <= kMaximumKeptSegmentSize) {
// Unlink the segment we wish to keep from the list.
keep = current;
keep->clear_next();
} else {
- int size = current->size();
+ size_t size = current->size();
#ifdef DEBUG
// Un-poison first so the zapping doesn't trigger ASan complaints.
ASAN_UNPOISON_MEMORY_REGION(current, size);
@@ -136,7 +155,7 @@ void Zone::DeleteAll() {
// variables 'position' and 'limit' to prepare for future allocate
// attempts. Otherwise, we must clear the position and limit to
// force a new segment to be allocated on demand.
- if (keep != NULL) {
+ if (keep) {
Address start = keep->start();
position_ = RoundUp(start, kAlignment);
limit_ = keep->end();
@@ -162,9 +181,9 @@ void Zone::DeleteKeptSegment() {
static const unsigned char kZapDeadByte = 0xcd;
#endif
- DCHECK(segment_head_ == NULL || segment_head_->next() == NULL);
- if (segment_head_ != NULL) {
- int size = segment_head_->size();
+ DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
+ if (segment_head_ != nullptr) {
+ size_t size = segment_head_->size();
#ifdef DEBUG
// Un-poison first so the zapping doesn't trigger ASan complaints.
ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
@@ -172,7 +191,7 @@ void Zone::DeleteKeptSegment() {
memset(segment_head_, kZapDeadByte, size);
#endif
DeleteSegment(segment_head_, size);
- segment_head_ = NULL;
+ segment_head_ = nullptr;
}
DCHECK(segment_bytes_allocated_ == 0);
@@ -181,10 +200,10 @@ void Zone::DeleteKeptSegment() {
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
-Segment* Zone::NewSegment(int size) {
+Segment* Zone::NewSegment(size_t size) {
Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
- adjust_segment_bytes_allocated(size);
- if (result != NULL) {
+ segment_bytes_allocated_ += size;
+ if (result != nullptr) {
result->Initialize(segment_head_, size);
segment_head_ = result;
}
@@ -193,51 +212,50 @@ Segment* Zone::NewSegment(int size) {
// Deletes the given segment. Does not touch the segment chain.
-void Zone::DeleteSegment(Segment* segment, int size) {
- adjust_segment_bytes_allocated(-size);
+void Zone::DeleteSegment(Segment* segment, size_t size) {
+ segment_bytes_allocated_ -= size;
Malloced::Delete(segment);
}
-Address Zone::NewExpand(int size) {
+Address Zone::NewExpand(size_t size) {
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
- DCHECK(size == RoundDown(size, kAlignment));
- DCHECK(size > limit_ - position_);
+ DCHECK_EQ(size, RoundDown(size, kAlignment));
+ DCHECK_LT(limit_, position_ + size);
// Compute the new segment size. We use a 'high water mark'
// strategy, where we increase the segment size every time we expand
// except that we employ a maximum segment size when we delete. This
// is to avoid excessive malloc() and free() overhead.
Segment* head = segment_head_;
- const size_t old_size = (head == NULL) ? 0 : head->size();
+ const size_t old_size = (head == nullptr) ? 0 : head->size();
static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment;
const size_t new_size_no_overhead = size + (old_size << 1);
size_t new_size = kSegmentOverhead + new_size_no_overhead;
- const size_t min_new_size = kSegmentOverhead + static_cast<size_t>(size);
+ const size_t min_new_size = kSegmentOverhead + size;
// Guard against integer overflow.
- if (new_size_no_overhead < static_cast<size_t>(size) ||
- new_size < static_cast<size_t>(kSegmentOverhead)) {
+ if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
V8::FatalProcessOutOfMemory("Zone");
- return NULL;
+ return nullptr;
}
- if (new_size < static_cast<size_t>(kMinimumSegmentSize)) {
+ if (new_size < kMinimumSegmentSize) {
new_size = kMinimumSegmentSize;
- } else if (new_size > static_cast<size_t>(kMaximumSegmentSize)) {
+ } else if (new_size > kMaximumSegmentSize) {
// Limit the size of new segments to avoid growing the segment size
// exponentially, thus putting pressure on contiguous virtual address space.
// All the while making sure to allocate a segment large enough to hold the
// requested size.
- new_size = Max(min_new_size, static_cast<size_t>(kMaximumSegmentSize));
+ new_size = Max(min_new_size, kMaximumSegmentSize);
}
if (new_size > INT_MAX) {
V8::FatalProcessOutOfMemory("Zone");
- return NULL;
+ return nullptr;
}
- Segment* segment = NewSegment(static_cast<int>(new_size));
- if (segment == NULL) {
+ Segment* segment = NewSegment(new_size);
+ if (segment == nullptr) {
V8::FatalProcessOutOfMemory("Zone");
- return NULL;
+ return nullptr;
}
// Recompute 'top' and 'limit' based on the new segment.
@@ -246,15 +264,12 @@ Address Zone::NewExpand(int size) {
// Check for address overflow.
// (Should not happen since the segment is guaranteed to accomodate
// size bytes + header and alignment padding)
- if (reinterpret_cast<uintptr_t>(position_)
- < reinterpret_cast<uintptr_t>(result)) {
- V8::FatalProcessOutOfMemory("Zone");
- return NULL;
- }
+ DCHECK_GE(reinterpret_cast<uintptr_t>(position_),
+ reinterpret_cast<uintptr_t>(result));
limit_ = segment->end();
DCHECK(position_ <= limit_);
return result;
}
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index 6f552b6524..a3511cdaa4 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -17,35 +17,34 @@
namespace v8 {
namespace internal {
-
+// Forward declarations.
class Segment;
-class Isolate;
+
// The Zone supports very fast allocation of small chunks of
// memory. The chunks cannot be deallocated individually, but instead
// the Zone supports deallocating all chunks in one fast
// operation. The Zone is used to hold temporary data structures like
// the abstract syntax tree, which is deallocated after compilation.
-
+//
// Note: There is no need to initialize the Zone; the first time an
// allocation is attempted, a segment of memory will be requested
// through a call to malloc().
-
+//
// Note: The implementation is inherently not thread safe. Do not use
// from multi-threaded code.
-
-class Zone {
+class Zone FINAL {
public:
- explicit Zone(Isolate* isolate);
+ Zone();
~Zone();
+
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
// allocating new segments of memory on demand using malloc().
- void* New(int size);
+ void* New(size_t size);
template <typename T>
- T* NewArray(int length) {
- CHECK(std::numeric_limits<int>::max() / static_cast<int>(sizeof(T)) >
- length);
+ T* NewArray(size_t length) {
+ DCHECK_LT(length, std::numeric_limits<size_t>::max() / sizeof(T));
return static_cast<T*>(New(length * sizeof(T)));
}
@@ -59,59 +58,55 @@ class Zone {
// Returns true if more memory has been allocated in zones than
// the limit allows.
- inline bool excess_allocation();
-
- inline void adjust_segment_bytes_allocated(int delta);
-
- inline unsigned allocation_size() const { return allocation_size_; }
+ bool excess_allocation() const {
+ return segment_bytes_allocated_ > kExcessLimit;
+ }
- inline Isolate* isolate() const { return isolate_; }
+ size_t allocation_size() const { return allocation_size_; }
private:
- friend class Isolate;
-
// All pointers returned from New() have this alignment. In addition, if the
// object being allocated has a size that is divisible by 8 then its alignment
// will be 8. ASan requires 8-byte alignment.
#ifdef V8_USE_ADDRESS_SANITIZER
- static const int kAlignment = 8;
+ static const size_t kAlignment = 8;
STATIC_ASSERT(kPointerSize <= 8);
#else
- static const int kAlignment = kPointerSize;
+ static const size_t kAlignment = kPointerSize;
#endif
// Never allocate segments smaller than this size in bytes.
- static const int kMinimumSegmentSize = 8 * KB;
+ static const size_t kMinimumSegmentSize = 8 * KB;
// Never allocate segments larger than this size in bytes.
- static const int kMaximumSegmentSize = 1 * MB;
+ static const size_t kMaximumSegmentSize = 1 * MB;
// Never keep segments larger than this size in bytes around.
- static const int kMaximumKeptSegmentSize = 64 * KB;
+ static const size_t kMaximumKeptSegmentSize = 64 * KB;
// Report zone excess when allocation exceeds this limit.
- static const int kExcessLimit = 256 * MB;
+ static const size_t kExcessLimit = 256 * MB;
// The number of bytes allocated in this zone so far.
- unsigned allocation_size_;
+ size_t allocation_size_;
// The number of bytes allocated in segments. Note that this number
// includes memory allocated from the OS but not yet allocated from
// the zone.
- int segment_bytes_allocated_;
+ size_t segment_bytes_allocated_;
// Expand the Zone to hold at least 'size' more bytes and allocate
// the bytes. Returns the address of the newly allocated chunk of
// memory in the Zone. Should only be called if there isn't enough
// room in the Zone already.
- Address NewExpand(int size);
+ Address NewExpand(size_t size);
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
- INLINE(Segment* NewSegment(int size));
+ inline Segment* NewSegment(size_t size);
// Deletes the given segment. Does not touch the segment chain.
- INLINE(void DeleteSegment(Segment* segment, int size));
+ inline void DeleteSegment(Segment* segment, size_t size);
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
@@ -120,7 +115,6 @@ class Zone {
Address limit_;
Segment* segment_head_;
- Isolate* isolate_;
};
@@ -129,7 +123,7 @@ class Zone {
class ZoneObject {
public:
// Allocate a new ZoneObject of 'size' bytes in the Zone.
- INLINE(void* operator new(size_t size, Zone* zone));
+ void* operator new(size_t size, Zone* zone) { return zone->New(size); }
// Ideally, the delete operator should be private instead of
// public, but unfortunately the compiler sometimes synthesizes
@@ -146,12 +140,12 @@ class ZoneObject {
// The ZoneScope is used to automatically call DeleteAll() on a
// Zone when the ZoneScope is destroyed (i.e. goes out of scope)
-struct ZoneScope {
+class ZoneScope FINAL {
public:
explicit ZoneScope(Zone* zone) : zone_(zone) { }
~ZoneScope() { zone_->DeleteAll(); }
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
private:
Zone* zone_;
@@ -160,12 +154,12 @@ struct ZoneScope {
// The ZoneAllocationPolicy is used to specialize generic data
// structures to allocate themselves and their elements in the Zone.
-struct ZoneAllocationPolicy {
+class ZoneAllocationPolicy FINAL {
public:
explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) { }
- INLINE(void* New(size_t size));
- INLINE(static void Delete(void *pointer)) { }
- Zone* zone() { return zone_; }
+ void* New(size_t size) { return zone()->New(size); }
+ static void Delete(void* pointer) {}
+ Zone* zone() const { return zone_; }
private:
Zone* zone_;
@@ -176,15 +170,15 @@ struct ZoneAllocationPolicy {
// elements. The list itself and all its elements are allocated in the
// Zone. ZoneLists cannot be deleted individually; you can delete all
// objects in the Zone by calling Zone::DeleteAll().
-template<typename T>
-class ZoneList: public List<T, ZoneAllocationPolicy> {
+template <typename T>
+class ZoneList FINAL : public List<T, ZoneAllocationPolicy> {
public:
// Construct a new ZoneList with the given capacity; the length is
// always zero. The capacity must be non-negative.
ZoneList(int capacity, Zone* zone)
: List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) { }
- INLINE(void* operator new(size_t size, Zone* zone));
+ void* operator new(size_t size, Zone* zone) { return zone->New(size); }
// Construct a new ZoneList by copying the elements of the given ZoneList.
ZoneList(const ZoneList<T>& other, Zone* zone)
@@ -195,27 +189,27 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
// We add some convenience wrappers so that we can pass in a Zone
// instead of a (less convenient) ZoneAllocationPolicy.
- INLINE(void Add(const T& element, Zone* zone)) {
+ void Add(const T& element, Zone* zone) {
List<T, ZoneAllocationPolicy>::Add(element, ZoneAllocationPolicy(zone));
}
- INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other, Zone* zone)) {
+ void AddAll(const List<T, ZoneAllocationPolicy>& other, Zone* zone) {
List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
}
- INLINE(void AddAll(const Vector<T>& other, Zone* zone)) {
+ void AddAll(const Vector<T>& other, Zone* zone) {
List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
}
- INLINE(void InsertAt(int index, const T& element, Zone* zone)) {
+ void InsertAt(int index, const T& element, Zone* zone) {
List<T, ZoneAllocationPolicy>::InsertAt(index, element,
ZoneAllocationPolicy(zone));
}
- INLINE(Vector<T> AddBlock(T value, int count, Zone* zone)) {
+ Vector<T> AddBlock(T value, int count, Zone* zone) {
return List<T, ZoneAllocationPolicy>::AddBlock(value, count,
ZoneAllocationPolicy(zone));
}
- INLINE(void Allocate(int length, Zone* zone)) {
+ void Allocate(int length, Zone* zone) {
List<T, ZoneAllocationPolicy>::Allocate(length, ZoneAllocationPolicy(zone));
}
- INLINE(void Initialize(int capacity, Zone* zone)) {
+ void Initialize(int capacity, Zone* zone) {
List<T, ZoneAllocationPolicy>::Initialize(capacity,
ZoneAllocationPolicy(zone));
}
@@ -229,13 +223,18 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
// different configurations of a concrete splay tree (see splay-tree.h).
// The tree itself and all its elements are allocated in the Zone.
template <typename Config>
-class ZoneSplayTree: public SplayTree<Config, ZoneAllocationPolicy> {
+class ZoneSplayTree FINAL : public SplayTree<Config, ZoneAllocationPolicy> {
public:
explicit ZoneSplayTree(Zone* zone)
: SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
- ~ZoneSplayTree();
+ ~ZoneSplayTree() {
+ // Reset the root to avoid unneeded iteration over all tree nodes
+ // in the destructor. For a zone-allocated tree, nodes will be
+ // freed by the Zone.
+ SplayTree<Config, ZoneAllocationPolicy>::ResetRoot();
+ }
- INLINE(void* operator new(size_t size, Zone* zone));
+ void* operator new(size_t size, Zone* zone) { return zone->New(size); }
void operator delete(void* pointer) { UNREACHABLE(); }
void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
@@ -244,6 +243,7 @@ class ZoneSplayTree: public SplayTree<Config, ZoneAllocationPolicy> {
typedef TemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ZONE_H_
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index 29e0c37e44..6607bef8cc 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -186,7 +186,7 @@ class BenchmarksTestSuite(testsuite.TestSuite):
# Both --nocrankshaft and --stressopt are very slow. Add TF but without
# always opt to match the way the benchmarks are run for performance
# testing.
- return [[], ["--turbo-asm", "--turbo-filter=*"]]
+ return [[], ["--turbo-filter=*"]]
def GetSuite(name, root):
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 170aa5a8a5..08fb01bc29 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -34,6 +34,10 @@
#include "test/cctest/profiler-extension.h"
#include "test/cctest/trace-extension.h"
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+#include "src/startup-data-util.h"
+#endif // V8_USE_EXTERNAL_STARTUP_DATA
+
#if V8_OS_WIN
#include <windows.h> // NOLINT
#if V8_CC_MSVC
@@ -166,6 +170,9 @@ int main(int argc, char* argv[]) {
v8::V8::InitializePlatform(platform);
v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::Initialize();
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+ v8::StartupDataHandler startup_data(argv[0], NULL, NULL);
+#endif
CcTestArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 4d1c467ea0..1b851e44cf 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -59,7 +59,6 @@
'compiler/test-codegen-deopt.cc',
'compiler/test-control-reducer.cc',
'compiler/test-gap-resolver.cc',
- 'compiler/test-graph-reducer.cc',
'compiler/test-graph-visualizer.cc',
'compiler/test-instruction.cc',
'compiler/test-js-context-specialization.cc',
@@ -74,6 +73,7 @@
'compiler/test-node-cache.cc',
'compiler/test-node.cc',
'compiler/test-operator.cc',
+ 'compiler/test-osr.cc',
'compiler/test-pipeline.cc',
'compiler/test-representation-change.cc',
'compiler/test-run-deopt.cc',
@@ -87,10 +87,7 @@
'compiler/test-run-properties.cc',
'compiler/test-run-stackcheck.cc',
'compiler/test-run-variables.cc',
- 'compiler/test-schedule.cc',
- 'compiler/test-scheduler.cc',
'compiler/test-simplified-lowering.cc',
- 'compiler/test-typer.cc',
'cctest.cc',
'gay-fixed.cc',
'gay-precision.cc',
@@ -100,12 +97,13 @@
'test-accessors.cc',
'test-alloc.cc',
'test-api.cc',
+ 'test-api.h',
+ 'test-api-interceptors.cc',
'test-ast.cc',
'test-atomicops.cc',
'test-bignum.cc',
'test-bignum-dtoa.cc',
'test-bit-vector.cc',
- 'test-checks.cc',
'test-circular-queue.cc',
'test-compiler.cc',
'test-constantpool.cc',
@@ -113,7 +111,6 @@
'test-cpu-profiler.cc',
'test-date.cc',
'test-debug.cc',
- 'test-declarative-accessors.cc',
'test-decls.cc',
'test-deoptimization.cc',
'test-dictionary.cc',
@@ -140,6 +137,7 @@
'test-microtask-delivery.cc',
'test-mark-compact.cc',
'test-mementos.cc',
+ 'test-migrations.cc',
'test-object-observe.cc',
'test-ordered-hash-table.cc',
'test-parsing.cc',
@@ -168,7 +166,9 @@
'test-weakmaps.cc',
'test-weaksets.cc',
'test-weaktypedarrays.cc',
- 'trace-extension.cc'
+ 'trace-extension.cc',
+ '../../src/startup-data-util.h',
+ '../../src/startup-data-util.cc'
],
'conditions': [
['v8_target_arch=="ia32"', {
@@ -212,6 +212,20 @@
'test-js-arm64-variables.cc'
],
}],
+ ['v8_target_arch=="ppc"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ 'test-assembler-ppc.cc',
+ 'test-code-stubs.cc',
+ 'test-disasm-ppc.cc'
+ ],
+ }],
+ ['v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc64) ###
+ 'test-assembler-ppc.cc',
+ 'test-code-stubs.cc',
+ 'test-disasm-ppc.cc'
+ ],
+ }],
['v8_target_arch=="mipsel"', {
'sources': [ ### gcmole(arch:mipsel) ###
'test-assembler-mips.cc',
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index a8239d26fb..e111438cdf 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -99,6 +99,7 @@ class TestHeap : public i::Heap {
using i::Heap::AllocateJSObjectFromMap;
using i::Heap::AllocateMap;
using i::Heap::CopyCode;
+ using i::Heap::kInitialNumberStringCacheSize;
};
@@ -346,6 +347,11 @@ static inline v8::Local<v8::String> v8_str(const char* x) {
}
+static inline v8::Local<v8::Symbol> v8_symbol(const char* name) {
+ return v8::Symbol::New(v8::Isolate::GetCurrent(), v8_str(name));
+}
+
+
static inline v8::Local<v8::Script> v8_compile(const char* x) {
return v8::Script::Compile(v8_str(x));
}
@@ -383,6 +389,14 @@ static inline v8::Local<v8::Value> CompileRun(const char* source) {
}
+// Compiles source as an ES6 module.
+static inline v8::Local<v8::Value> CompileRunModule(const char* source) {
+ v8::ScriptCompiler::Source script_source(v8_str(source));
+ return v8::ScriptCompiler::CompileModule(v8::Isolate::GetCurrent(),
+ &script_source)->Run();
+}
+
+
static inline v8::Local<v8::Value> CompileRun(v8::Local<v8::String> source) {
return v8::Script::Compile(source)->Run();
}
@@ -439,7 +453,7 @@ static inline void ExpectString(const char* code, const char* expected) {
v8::Local<v8::Value> result = CompileRun(code);
CHECK(result->IsString());
v8::String::Utf8Value utf8(result);
- CHECK_EQ(expected, *utf8);
+ CHECK_EQ(0, strcmp(expected, *utf8));
}
@@ -485,27 +499,39 @@ static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
v8::internal::AllocationResult allocation =
space->AllocateRaw(v8::internal::Page::kMaxRegularHeapObjectSize);
if (allocation.IsRetry()) return false;
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(space->heap(), v8::internal::Page::kMaxRegularHeapObjectSize);
+ v8::internal::HeapObject* free_space = NULL;
+ CHECK(allocation.To(&free_space));
+ space->heap()->CreateFillerObjectAt(
+ free_space->address(), v8::internal::Page::kMaxRegularHeapObjectSize);
return true;
}
-static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
- int new_linear_size = static_cast<int>(*space->allocation_limit_address() -
+// Helper function that simulates a fill new-space in the heap.
+static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
+ int extra_bytes) {
+ int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
- if (new_linear_size > 0) {
- // Fill up the current page.
- v8::internal::AllocationResult allocation =
- space->AllocateRaw(new_linear_size);
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(space->heap(), new_linear_size);
+ CHECK(space_remaining >= extra_bytes);
+ int new_linear_size = space_remaining - extra_bytes;
+ if (new_linear_size == 0) return;
+ v8::internal::AllocationResult allocation =
+ space->AllocateRaw(new_linear_size);
+ v8::internal::HeapObject* free_space = NULL;
+ CHECK(allocation.To(&free_space));
+ space->heap()->CreateFillerObjectAt(free_space->address(), new_linear_size);
+}
+
+
+static inline void FillCurrentPage(v8::internal::NewSpace* space) {
+ AllocateAllButNBytes(space, 0);
+}
+
+
+static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
+ FillCurrentPage(space);
+ while (FillUpOnePage(space)) {
}
- // Fill up all remaining pages.
- while (FillUpOnePage(space))
- ;
}
@@ -544,7 +570,7 @@ class HeapObjectsTracker {
public:
HeapObjectsTracker() {
heap_profiler_ = i::Isolate::Current()->heap_profiler();
- CHECK_NE(NULL, heap_profiler_);
+ CHECK_NOT_NULL(heap_profiler_);
heap_profiler_->StartHeapObjectsTracking(true);
}
@@ -576,7 +602,7 @@ class InitializedHandleScope {
class HandleAndZoneScope : public InitializedHandleScope {
public:
- HandleAndZoneScope() : main_zone_(main_isolate()) {}
+ HandleAndZoneScope() {}
// Prefixing the below with main_ reduces a lot of naming clashes.
i::Zone* main_zone() { return &main_zone_; }
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 95fb839aa6..c58b1f0204 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -64,6 +64,9 @@
# are actually 13 * 38 * 5 * 128 = 316160 individual tests hidden here.
'test-parsing/ParserSync': [PASS, NO_VARIANTS],
+ # Modules are busted
+ 'test-parsing/ExportsMaybeAssigned': [SKIP],
+
# This tests only the type system, so there is no point in running several
# variants.
'test-hydrogen-types/*': [PASS, NO_VARIANTS],
@@ -91,32 +94,13 @@
# BUG(3742).
'test-mark-compact/MarkCompactCollector': [PASS, ['arch==arm', NO_VARIANTS]],
- # Support for %GetFrameDetails is missing and requires checkpoints.
- 'test-api/Regress385349': [PASS, NO_VARIANTS],
- 'test-debug/DebuggerStatement': [PASS, NO_VARIANTS],
- 'test-debug/DebuggerStatementBreakpoint': [PASS, NO_VARIANTS],
- 'test-debug/DebugEvaluateWithCodeGenerationDisallowed': [PASS, NO_VARIANTS],
+ # TODO(jarin/mstarzinger): Investigate debugger issues with TurboFan.
'test-debug/DebugStepNatives': [PASS, NO_VARIANTS],
- 'test-debug/DebugStepFunctionCall': [PASS, NO_VARIANTS],
'test-debug/DebugStepFunctionApply': [PASS, NO_VARIANTS],
- 'test-debug/ScriptNameAndData': [PASS, NO_VARIANTS],
- 'test-debug/ContextData': [PASS, NO_VARIANTS],
- 'test-debug/DebugBreakInMessageHandler': [PASS, NO_VARIANTS],
- 'test-debug/CallFunctionInDebugger': [PASS, NO_VARIANTS],
- 'test-debug/CallingContextIsNotDebugContext': [PASS, NO_VARIANTS],
- 'test-debug/DebugEventContext': [PASS, NO_VARIANTS],
- 'test-debug/DebugBreakInline': [PASS, NO_VARIANTS],
- 'test-debug/BreakMessageWhenMessageHandlerIsReset': [PASS, NO_VARIANTS],
- 'test-debug/DebugBreak': [PASS, NO_VARIANTS],
- 'test-debug/DebugBreakFunctionApply': [PASS, NO_VARIANTS],
- 'test-debug/DebugBreakStackInspection': [PASS, NO_VARIANTS],
- 'test-debug/DeoptimizeDuringDebugBreak': [PASS, NO_VARIANTS],
- 'test-debug/DisableBreak': [PASS, NO_VARIANTS],
- 'test-debug/NoDebugBreakInAfterCompileMessageHandler': [PASS, NO_VARIANTS],
- 'test-debug/RegExpDebugBreak': [PASS, NO_VARIANTS],
-
- # TODO(titzer): Triggers bug in late control reduction.
- 'test-run-inlining/InlineLoopGuardedEmpty': [SKIP],
+ 'test-debug/DebugStepFunctionCall': [PASS, NO_VARIANTS],
+
+ # TODO(jochen): Reenable after we removed the CHECK() from the marking queue.
+ 'test-mark-compact/MarkingDeque': [SKIP],
############################################################################
# Slow tests.
@@ -160,6 +144,7 @@
# Pass but take too long with the simulator.
'test-api/ExternalArrays': [PASS, TIMEOUT],
'test-api/Threading1': [SKIP],
+ 'test-api/Threading2': [SKIP],
}], # 'arch == arm64 and simulator_run == True'
['arch == arm64 and mode == debug and simulator_run == True', {
@@ -178,13 +163,10 @@
['asan == True', {
# Skip tests not suitable for ASAN.
'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
- 'test-lockers/MultithreadedParallelIsolates': [SKIP],
}], # 'asan == True'
##############################################################################
['no_snap == True', {
- # BUG(3215)
- 'test-lockers/MultithreadedParallelIsolates': [PASS, FAIL, TIMEOUT],
}], # 'no_snap == True'
##############################################################################
@@ -204,9 +186,6 @@
# BUG(3005).
'test-alloc/CodeRange': [PASS, FAIL],
- # BUG(3215). Crashes on windows.
- 'test-lockers/MultithreadedParallelIsolates': [SKIP],
-
# BUG(3331). Fails on windows.
'test-heap/NoWeakHashTableLeakWithIncrementalMarking': [SKIP],
@@ -324,7 +303,6 @@
# NaCl builds have problems with threaded tests since Pepper_28.
# V8 Issue 2786
'test-api/Threading1': [SKIP],
- 'test-lockers/MultithreadedParallelIsolates': [SKIP],
'test-lockers/ExtensionsRegistration': [SKIP],
# These tests fail as there is no /tmp directory in Native Client.
@@ -423,4 +401,9 @@
'test-accessors/Gc' : [SKIP],
}], # 'arch == nacl_ia32 or arch == nacl_x64'
+
+['arch == ppc64', {
+ #issue 2857
+ 'test-log/EquivalenceOfLoggingAndTraversal' : [SKIP],
+}], # 'arch == ppc64'
]
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index cad171e60c..ffafaf0803 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -207,7 +207,7 @@ class CallHelper {
Simulator::CallArgument::End()};
return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
}
-#elif USE_SIMULATOR && V8_TARGET_ARCH_MIPS64
+#elif USE_SIMULATOR && (V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64)
uintptr_t CallSimulator(byte* f, int64_t p1 = 0, int64_t p2 = 0,
int64_t p3 = 0, int64_t p4 = 0) {
Simulator* simulator = Simulator::current(isolate_);
@@ -243,7 +243,8 @@ class CallHelper {
ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
ParameterTraits<P4>::Cast(p4)));
}
-#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS)
+#elif USE_SIMULATOR && \
+ (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_PPC)
uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
int32_t p3 = 0, int32_t p4 = 0) {
Simulator* simulator = Simulator::current(isolate_);
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index 5311001714..d05b282293 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -141,7 +141,7 @@ TEST(CompareWrapper) {
CompareWrapper wFloat64LessThanOrEqual(IrOpcode::kFloat64LessThanOrEqual);
// Check NaN handling.
- double nan = v8::base::OS::nan_value();
+ double nan = std::numeric_limits<double>::quiet_NaN();
double inf = V8_INFINITY;
CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, 0.0));
CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, 1.0));
@@ -373,9 +373,9 @@ void Int32BinopInputShapeTester::RunRight(
TEST(ParametersEqual) {
RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* p1 = m.Parameter(1);
- CHECK_NE(NULL, p1);
+ CHECK(p1);
Node* p0 = m.Parameter(0);
- CHECK_NE(NULL, p0);
+ CHECK(p0);
CHECK_EQ(p0, m.Parameter(0));
CHECK_EQ(p1, m.Parameter(1));
}
@@ -561,7 +561,7 @@ TEST(RunBinopTester) {
Float64BinopTester bt(&m);
bt.AddReturn(bt.param0);
- FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 9.0)); }
+ FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(*i, 9.0)); }
}
{
@@ -569,7 +569,7 @@ TEST(RunBinopTester) {
Float64BinopTester bt(&m);
bt.AddReturn(bt.param1);
- FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(-11.25, *i)); }
+ FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(-11.25, *i)); }
}
}
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 283d533974..d45d1fdc33 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -32,7 +32,7 @@ class MachineAssemblerTester : public HandleAndZoneScope,
main_isolate(),
MakeMachineSignature(main_zone(), return_type, p0, p1, p2, p3, p4)),
MachineAssembler(
- new (main_zone()) Graph(main_zone()),
+ main_isolate(), new (main_zone()) Graph(main_zone()),
MakeMachineSignature(main_zone(), return_type, p0, p1, p2, p3, p4),
kMachPtr, flags) {}
@@ -68,8 +68,8 @@ class MachineAssemblerTester : public HandleAndZoneScope,
Schedule* schedule = this->Export();
CallDescriptor* call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
- code_ =
- Pipeline::GenerateCodeForTesting(call_descriptor, graph, schedule);
+ code_ = Pipeline::GenerateCodeForTesting(this->isolate(), call_descriptor,
+ graph, schedule);
}
return this->code_.ToHandleChecked()->entry();
}
@@ -332,6 +332,16 @@ class Int32BinopInputShapeTester {
void RunLeft(RawMachineAssemblerTester<int32_t>* m);
void RunRight(RawMachineAssemblerTester<int32_t>* m);
};
+
+// TODO(bmeurer): Drop this crap once we switch to GTest/Gmock.
+static inline void CheckDoubleEq(volatile double x, volatile double y) {
+ if (std::isnan(x)) {
+ CHECK(std::isnan(y));
+ } else {
+ CHECK_EQ(x, y);
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index 7e16eead38..440043cb94 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -36,7 +36,7 @@ class FunctionTester : public InitializedHandleScope {
const uint32_t supported_flags = CompilationInfo::kContextSpecializing |
CompilationInfo::kInliningEnabled |
CompilationInfo::kTypingEnabled;
- CHECK_EQ(0, flags_ & ~supported_flags);
+ CHECK_EQ(0u, flags_ & ~supported_flags);
}
explicit FunctionTester(Graph* graph)
@@ -154,7 +154,7 @@ class FunctionTester : public InitializedHandleScope {
#if V8_TURBOFAN_TARGET
CompilationInfoWithZone info(function);
- CHECK(Parser::Parse(&info));
+ CHECK(Parser::ParseStatic(&info));
info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
if (flags_ & CompilationInfo::kContextSpecializing) {
info.MarkAsContextSpecializing();
@@ -210,7 +210,7 @@ class FunctionTester : public InitializedHandleScope {
CHECK(Pipeline::SupportedTarget());
CompilationInfoWithZone info(function);
- CHECK(Parser::Parse(&info));
+ CHECK(Parser::ParseStatic(&info));
info.SetOptimizing(BailoutId::None(),
Handle<Code>(function->shared()->code()));
CHECK(Compiler::Analyze(&info));
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.cc b/deps/v8/test/cctest/compiler/graph-builder-tester.cc
index b0f470b01d..38bc633711 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.cc
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.cc
@@ -11,15 +11,17 @@ namespace v8 {
namespace internal {
namespace compiler {
-MachineCallHelper::MachineCallHelper(Zone* zone, MachineSignature* machine_sig)
- : CallHelper(zone->isolate(), machine_sig),
+MachineCallHelper::MachineCallHelper(Isolate* isolate,
+ MachineSignature* machine_sig)
+ : CallHelper(isolate, machine_sig),
parameters_(NULL),
+ isolate_(isolate),
graph_(NULL) {}
void MachineCallHelper::InitParameters(GraphBuilder* builder,
CommonOperatorBuilder* common) {
- DCHECK_EQ(NULL, parameters_);
+ DCHECK(!parameters_);
graph_ = builder->graph();
int param_count = static_cast<int>(parameter_count());
if (param_count == 0) return;
@@ -37,14 +39,14 @@ byte* MachineCallHelper::Generate() {
Zone* zone = graph_->zone();
CallDescriptor* desc =
Linkage::GetSimplifiedCDescriptor(zone, machine_sig_);
- code_ = Pipeline::GenerateCodeForTesting(desc, graph_);
+ code_ = Pipeline::GenerateCodeForTesting(isolate_, desc, graph_);
}
return code_.ToHandleChecked()->entry();
}
Node* MachineCallHelper::Parameter(size_t index) {
- DCHECK_NE(NULL, parameters_);
+ DCHECK(parameters_);
DCHECK(index < parameter_count());
return parameters_[index];
}
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index 772de4d11c..9d71c85018 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -22,7 +22,8 @@ namespace compiler {
// A class that just passes node creation on to the Graph.
class DirectGraphBuilder : public GraphBuilder {
public:
- explicit DirectGraphBuilder(Graph* graph) : GraphBuilder(graph) {}
+ DirectGraphBuilder(Isolate* isolate, Graph* graph)
+ : GraphBuilder(isolate, graph) {}
virtual ~DirectGraphBuilder() {}
protected:
@@ -35,7 +36,7 @@ class DirectGraphBuilder : public GraphBuilder {
class MachineCallHelper : public CallHelper {
public:
- MachineCallHelper(Zone* zone, MachineSignature* machine_sig);
+ MachineCallHelper(Isolate* isolate, MachineSignature* machine_sig);
Node* Parameter(size_t index);
@@ -51,6 +52,7 @@ class MachineCallHelper : public CallHelper {
private:
Node** parameters_;
// TODO(dcarney): shouldn't need graph stored.
+ Isolate* isolate_;
Graph* graph_;
MaybeHandle<Code> code_;
};
@@ -88,12 +90,12 @@ class GraphBuilderTester
MachineType p4 = kMachNone)
: GraphAndBuilders(main_zone()),
MachineCallHelper(
- main_zone(),
+ main_isolate(),
MakeMachineSignature(
main_zone(), ReturnValueTraits<ReturnType>::Representation(),
p0, p1, p2, p3, p4)),
- SimplifiedGraphBuilder(main_graph_, &main_common_, &main_machine_,
- &main_simplified_) {
+ SimplifiedGraphBuilder(main_isolate(), main_graph_, &main_common_,
+ &main_machine_, &main_simplified_) {
Begin(static_cast<int>(parameter_count()));
InitParameters(this, &main_common_);
}
diff --git a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc b/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
index baa03fbb8f..e65ba2e0df 100644
--- a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
@@ -11,9 +11,9 @@ namespace internal {
namespace compiler {
SimplifiedGraphBuilder::SimplifiedGraphBuilder(
- Graph* graph, CommonOperatorBuilder* common,
+ Isolate* isolate, Graph* graph, CommonOperatorBuilder* common,
MachineOperatorBuilder* machine, SimplifiedOperatorBuilder* simplified)
- : GraphBuilder(graph),
+ : GraphBuilder(isolate, graph),
effect_(NULL),
return_(NULL),
common_(common),
diff --git a/deps/v8/test/cctest/compiler/simplified-graph-builder.h b/deps/v8/test/cctest/compiler/simplified-graph-builder.h
index 537094a3b6..c1a8b9be4c 100644
--- a/deps/v8/test/cctest/compiler/simplified-graph-builder.h
+++ b/deps/v8/test/cctest/compiler/simplified-graph-builder.h
@@ -18,13 +18,13 @@ namespace compiler {
class SimplifiedGraphBuilder : public GraphBuilder {
public:
- SimplifiedGraphBuilder(Graph* graph, CommonOperatorBuilder* common,
+ SimplifiedGraphBuilder(Isolate* isolate, Graph* graph,
+ CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
SimplifiedOperatorBuilder* simplified);
virtual ~SimplifiedGraphBuilder() {}
Zone* zone() const { return graph()->zone(); }
- Isolate* isolate() const { return zone()->isolate(); }
CommonOperatorBuilder* common() const { return common_; }
MachineOperatorBuilder* machine() const { return machine_; }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
index 703fc176ad..fa4da9a736 100644
--- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
+++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
@@ -24,7 +24,7 @@ class BasicBlockProfilerTest : public RawMachineAssemblerTester<int32_t> {
void ResetCounts() { isolate()->basic_block_profiler()->ResetCounts(); }
void Expect(size_t size, uint32_t* expected) {
- CHECK_NE(NULL, isolate()->basic_block_profiler());
+ CHECK(isolate()->basic_block_profiler());
const BasicBlockProfiler::DataList* l =
isolate()->basic_block_profiler()->data_list();
CHECK_NE(0, static_cast<int>(l->size()));
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index cd3472d6bb..58202a61b0 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -415,7 +415,7 @@ TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_false) {
TEST(BranchCombineFloat64Compares) {
double inf = V8_INFINITY;
- double nan = v8::base::OS::nan_value();
+ double nan = std::numeric_limits<double>::quiet_NaN();
double inputs[] = {0.0, 1.0, -1.0, -inf, inf, nan};
int32_t eq_constant = -1733;
@@ -444,10 +444,10 @@ TEST(BranchCombineFloat64Compares) {
m.Bind(&blockb);
m.Return(m.Int32Constant(ne_constant));
- for (size_t i = 0; i < arraysize(inputs); i++) {
- for (size_t j = 0; j < arraysize(inputs); j += 2) {
+ for (size_t i = 0; i < arraysize(inputs); ++i) {
+ for (size_t j = 0; j < arraysize(inputs); ++j) {
input_a = inputs[i];
- input_b = inputs[i];
+ input_b = inputs[j];
int32_t expected =
invert ? (cmp.Float64Compare(input_a, input_b) ? ne_constant
: eq_constant)
diff --git a/deps/v8/test/cctest/compiler/test-changes-lowering.cc b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
index 5795754c45..d11210bb8b 100644
--- a/deps/v8/test/cctest/compiler/test-changes-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
@@ -7,7 +7,7 @@
#include "src/compiler/change-lowering.h"
#include "src/compiler/control-builders.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/select-lowering.h"
#include "src/compiler/simplified-lowering.h"
@@ -33,7 +33,8 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
explicit ChangesLoweringTester(MachineType p0 = kMachNone)
: GraphBuilderTester<ReturnType>(p0),
javascript(this->zone()),
- jsgraph(this->graph(), this->common(), &javascript, this->machine()),
+ jsgraph(this->isolate(), this->graph(), this->common(), &javascript,
+ this->machine()),
function(Handle<JSFunction>::null()) {}
JSOperatorBuilder javascript;
@@ -125,11 +126,9 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
void LowerChange(Node* change) {
// Run the graph reducer with changes lowering on a single node.
- CompilationInfo info(this->isolate(), this->zone());
- Linkage linkage(this->zone(), &info);
- Typer typer(this->graph(), info.context());
+ Typer typer(this->isolate(), this->graph(), Handle<Context>());
typer.Run();
- ChangeLowering change_lowering(&jsgraph, &linkage);
+ ChangeLowering change_lowering(&jsgraph);
SelectLowering select_lowering(this->graph(), this->common());
GraphReducer reducer(this->graph(), this->zone());
reducer.AddReducer(&change_lowering);
@@ -241,13 +240,13 @@ TEST(RunChangeTaggedToFloat64) {
{
Handle<Object> number = t.factory()->NewNumber(input);
t.Call(*number);
- CHECK_EQ(input, result);
+ CheckDoubleEq(input, result);
}
{
Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
t.Call(*number);
- CHECK_EQ(input, result);
+ CheckDoubleEq(input, result);
}
}
}
diff --git a/deps/v8/test/cctest/compiler/test-codegen-deopt.cc b/deps/v8/test/cctest/compiler/test-codegen-deopt.cc
index 56afe7b6dd..a90e4025dc 100644
--- a/deps/v8/test/cctest/compiler/test-codegen-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-codegen-deopt.cc
@@ -46,7 +46,7 @@ class DeoptCodegenTester {
function(NewFunction(src)),
info(function, scope->main_zone()),
bailout_id(-1) {
- CHECK(Parser::Parse(&info));
+ CHECK(Parser::ParseStatic(&info));
info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
CHECK(Compiler::Analyze(&info));
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
@@ -72,6 +72,7 @@ class DeoptCodegenTester {
}
Zone* zone() { return scope_->main_zone(); }
+ Isolate* isolate() { return scope_->main_isolate(); }
HandleAndZoneScope* scope_;
Handle<JSFunction> function;
@@ -102,7 +103,7 @@ class TrivialDeoptCodegenTester : public DeoptCodegenTester {
// }
CSignature1<Object*, Object*> sig;
- RawMachineAssembler m(graph, &sig);
+ RawMachineAssembler m(isolate(), graph, &sig);
Handle<JSFunction> deopt_function =
NewFunction("function deopt() { %DeoptimizeFunction(foo); }; deopt");
@@ -220,7 +221,7 @@ class TrivialRuntimeDeoptCodegenTester : public DeoptCodegenTester {
// }
CSignature1<Object*, Object*> sig;
- RawMachineAssembler m(graph, &sig);
+ RawMachineAssembler m(isolate(), graph, &sig);
Unique<HeapObject> this_fun_constant =
Unique<HeapObject>::CreateUninitialized(function);
diff --git a/deps/v8/test/cctest/compiler/test-control-reducer.cc b/deps/v8/test/cctest/compiler/test-control-reducer.cc
index 03aa50b376..827dcfdaa8 100644
--- a/deps/v8/test/cctest/compiler/test-control-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-control-reducer.cc
@@ -10,13 +10,15 @@
#include "src/compiler/control-reducer.h"
#include "src/compiler/graph-inl.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
using namespace v8::internal;
using namespace v8::internal::compiler;
static const size_t kNumLeafs = 4;
+enum Decision { kFalse, kUnknown, kTrue };
+
// TODO(titzer): convert this whole file into unit tests.
static int CheckInputs(Node* node, Node* i0 = NULL, Node* i1 = NULL,
@@ -52,10 +54,8 @@ static int CheckLoop(Node* node, Node* i0 = NULL, Node* i1 = NULL,
bool IsUsedBy(Node* a, Node* b) {
- for (UseIter i = a->uses().begin(); i != a->uses().end(); ++i) {
- if (b == *i) return true;
- }
- return false;
+ auto const uses = a->uses();
+ return std::find(uses.begin(), uses.end(), b) != uses.end();
}
@@ -66,7 +66,7 @@ class ControlReducerTester : HandleAndZoneScope {
: isolate(main_isolate()),
common(main_zone()),
graph(main_zone()),
- jsgraph(&graph, &common, NULL, NULL),
+ jsgraph(main_isolate(), &graph, &common, NULL, NULL),
start(graph.NewNode(common.Start(1))),
end(graph.NewNode(common.End(), start)),
p0(graph.NewNode(common.Parameter(0), start)),
@@ -153,6 +153,12 @@ class ControlReducerTester : HandleAndZoneScope {
ReducePhiIterative(expect, phi); // iterative should give the same result.
}
+ // Checks one-step reduction of a phi.
+ void ReducePhiNonIterative(Node* expect, Node* phi) {
+ Node* result = ControlReducer::ReducePhiForTesting(&jsgraph, &common, phi);
+ CHECK_EQ(expect, result);
+ }
+
void ReducePhiIterative(Node* expect, Node* phi) {
p0->ReplaceInput(0, start); // hack: parameters may be trimmed.
Node* ret = graph.NewNode(common.Return(), phi, start, start);
@@ -164,8 +170,7 @@ class ControlReducerTester : HandleAndZoneScope {
}
void ReduceMerge(Node* expect, Node* merge) {
- Node* result =
- ControlReducer::ReduceMergeForTesting(&jsgraph, &common, merge);
+ Node* result = ControlReducer::ReduceMerge(&jsgraph, &common, merge);
CHECK_EQ(expect, result);
}
@@ -177,10 +182,25 @@ class ControlReducerTester : HandleAndZoneScope {
CheckInputs(end, expect);
}
- void ReduceBranch(Node* expect, Node* branch) {
- Node* result =
- ControlReducer::ReduceBranchForTesting(&jsgraph, &common, branch);
- CHECK_EQ(expect, result);
+ void ReduceBranch(Decision expected, Node* branch) {
+ Node* control = branch->InputAt(1);
+ for (Node* use : branch->uses()) {
+ if (use->opcode() == IrOpcode::kIfTrue) {
+ Node* result =
+ ControlReducer::ReduceIfNodeForTesting(&jsgraph, &common, use);
+ if (expected == kTrue) CHECK_EQ(control, result);
+ if (expected == kFalse) CHECK_EQ(IrOpcode::kDead, result->opcode());
+ if (expected == kUnknown) CHECK_EQ(use, result);
+ } else if (use->opcode() == IrOpcode::kIfFalse) {
+ Node* result =
+ ControlReducer::ReduceIfNodeForTesting(&jsgraph, &common, use);
+ if (expected == kFalse) CHECK_EQ(control, result);
+ if (expected == kTrue) CHECK_EQ(IrOpcode::kDead, result->opcode());
+ if (expected == kUnknown) CHECK_EQ(use, result);
+ } else {
+ UNREACHABLE();
+ }
+ }
}
Node* Return(Node* val, Node* effect, Node* control) {
@@ -206,7 +226,7 @@ TEST(Trim1_dead) {
CHECK(IsUsedBy(T.start, T.p0));
T.Trim();
CHECK(!IsUsedBy(T.start, T.p0));
- CHECK_EQ(NULL, T.p0->InputAt(0));
+ CHECK(!T.p0->InputAt(0));
}
@@ -237,9 +257,9 @@ TEST(Trim2_dead) {
CHECK(!IsUsedBy(T.one, phi));
CHECK(!IsUsedBy(T.half, phi));
CHECK(!IsUsedBy(T.start, phi));
- CHECK_EQ(NULL, phi->InputAt(0));
- CHECK_EQ(NULL, phi->InputAt(1));
- CHECK_EQ(NULL, phi->InputAt(2));
+ CHECK(!phi->InputAt(0));
+ CHECK(!phi->InputAt(1));
+ CHECK(!phi->InputAt(2));
}
@@ -259,7 +279,7 @@ TEST(Trim_chain1) {
T.Trim();
for (int i = 0; i < kDepth; i++) {
CHECK(!IsUsedBy(live[i], dead[i]));
- CHECK_EQ(NULL, dead[i]->InputAt(0));
+ CHECK(!dead[i]->InputAt(0));
CHECK_EQ(i == 0 ? T.start : live[i - 1], live[i]->InputAt(0));
}
}
@@ -339,9 +359,9 @@ TEST(Trim_cycle2) {
CHECK(!IsUsedBy(loop, phi));
CHECK(!IsUsedBy(T.one, phi));
CHECK(!IsUsedBy(T.half, phi));
- CHECK_EQ(NULL, phi->InputAt(0));
- CHECK_EQ(NULL, phi->InputAt(1));
- CHECK_EQ(NULL, phi->InputAt(2));
+ CHECK(!phi->InputAt(0));
+ CHECK(!phi->InputAt(1));
+ CHECK(!phi->InputAt(2));
}
@@ -350,8 +370,8 @@ void CheckTrimConstant(ControlReducerTester* T, Node* k) {
CHECK(IsUsedBy(k, phi));
T->Trim();
CHECK(!IsUsedBy(k, phi));
- CHECK_EQ(NULL, phi->InputAt(0));
- CHECK_EQ(NULL, phi->InputAt(1));
+ CHECK(!phi->InputAt(0));
+ CHECK(!phi->InputAt(1));
}
@@ -470,10 +490,10 @@ TEST(CReducePhi2_dead) {
for (size_t i = 1; i < kNumLeafs; i++) {
Node* a = R.leaf[i], *b = R.leaf[0];
Node* phi1 = R.Phi(b, a, R.dead);
- R.ReducePhi(phi1, phi1);
+ R.ReducePhiNonIterative(phi1, phi1);
Node* phi2 = R.Phi(a, b, R.dead);
- R.ReducePhi(phi2, phi2);
+ R.ReducePhiNonIterative(phi2, phi2);
}
}
@@ -859,8 +879,7 @@ TEST(CMergeReduce_exhaustive_4) {
if (!selector.is_selected(i)) merge->ReplaceInput(i, R.dead);
}
- Node* result =
- ControlReducer::ReduceMergeForTesting(&R.jsgraph, &R.common, merge);
+ Node* result = ControlReducer::ReduceMerge(&R.jsgraph, &R.common, merge);
int count = selector.count;
if (count == 0) {
@@ -939,7 +958,7 @@ TEST(CMergeReduce_dead_chain1) {
R.graph.SetEnd(end);
R.ReduceGraph();
CHECK(merge->IsDead());
- CHECK_EQ(NULL, end->InputAt(0)); // end dies.
+ CHECK(!end->InputAt(0)); // end dies.
}
}
@@ -1030,7 +1049,7 @@ struct While {
TEST(CBranchReduce_none1) {
ControlReducerTester R;
Diamond d(R, R.p0);
- R.ReduceBranch(d.branch, d.branch);
+ R.ReduceBranch(kUnknown, d.branch);
}
@@ -1039,7 +1058,7 @@ TEST(CBranchReduce_none2) {
Diamond d1(R, R.p0);
Diamond d2(R, R.p0);
d2.chain(d1);
- R.ReduceBranch(d2.branch, d2.branch);
+ R.ReduceBranch(kUnknown, d2.branch);
}
@@ -1052,13 +1071,7 @@ TEST(CBranchReduce_true) {
for (size_t i = 0; i < arraysize(true_values); i++) {
Diamond d(R, true_values[i]);
- Node* true_use = R.graph.NewNode(R.common.Merge(1), d.if_true);
- Node* false_use = R.graph.NewNode(R.common.Merge(1), d.if_false);
- R.ReduceBranch(R.start, d.branch);
- CHECK_EQ(R.start, true_use->InputAt(0));
- CHECK_EQ(IrOpcode::kDead, false_use->InputAt(0)->opcode());
- CHECK(d.if_true->IsDead()); // replaced
- CHECK(d.if_false->IsDead()); // replaced
+ R.ReduceBranch(kTrue, d.branch);
}
}
@@ -1070,13 +1083,7 @@ TEST(CBranchReduce_false) {
for (size_t i = 0; i < arraysize(false_values); i++) {
Diamond d(R, false_values[i]);
- Node* true_use = R.graph.NewNode(R.common.Merge(1), d.if_true);
- Node* false_use = R.graph.NewNode(R.common.Merge(1), d.if_false);
- R.ReduceBranch(R.start, d.branch);
- CHECK_EQ(R.start, false_use->InputAt(0));
- CHECK_EQ(IrOpcode::kDead, true_use->InputAt(0)->opcode());
- CHECK(d.if_true->IsDead()); // replaced
- CHECK(d.if_false->IsDead()); // replaced
+ R.ReduceBranch(kFalse, d.branch);
}
}
@@ -1211,172 +1218,6 @@ TEST(CDeadLoop2) {
}
-TEST(CNonTermLoop1) {
- ControlReducerTester R;
- Node* loop =
- R.SetSelfReferences(R.graph.NewNode(R.common.Loop(2), R.start, R.self));
- R.ReduceGraph();
- Node* end = R.graph.end();
- CheckLoop(loop, R.start, loop);
- Node* merge = end->InputAt(0);
- CheckMerge(merge, R.start, loop);
-}
-
-
-TEST(CNonTermLoop2) {
- ControlReducerTester R;
- Diamond d(R, R.p0);
- Node* loop = R.SetSelfReferences(
- R.graph.NewNode(R.common.Loop(2), d.if_false, R.self));
- d.merge->ReplaceInput(1, R.dead);
- Node* end = R.graph.end();
- end->ReplaceInput(0, d.merge);
- R.ReduceGraph();
- CHECK_EQ(end, R.graph.end());
- CheckLoop(loop, d.if_false, loop);
- Node* merge = end->InputAt(0);
- CheckMerge(merge, d.if_true, loop);
-}
-
-
-TEST(NonTermLoop3) {
- ControlReducerTester R;
- Node* loop = R.graph.NewNode(R.common.Loop(2), R.start, R.start);
- Branch b(R, R.one, loop);
- loop->ReplaceInput(1, b.if_true);
- Node* end = R.graph.end();
- end->ReplaceInput(0, b.if_false);
-
- R.ReduceGraph();
-
- CHECK_EQ(end, R.graph.end());
- CheckInputs(end, loop);
- CheckInputs(loop, R.start, loop);
-}
-
-
-TEST(CNonTermLoop_terminate1) {
- ControlReducerTester R;
- Node* loop = R.graph.NewNode(R.common.Loop(2), R.start, R.start);
- Node* effect = R.SetSelfReferences(
- R.graph.NewNode(R.common.EffectPhi(2), R.start, R.self, loop));
- Branch b(R, R.one, loop);
- loop->ReplaceInput(1, b.if_true);
- Node* end = R.graph.end();
- end->ReplaceInput(0, b.if_false);
-
- R.ReduceGraph();
-
- CHECK_EQ(end, R.graph.end());
- CheckLoop(loop, R.start, loop);
- Node* terminate = end->InputAt(0);
- CHECK_EQ(IrOpcode::kTerminate, terminate->opcode());
- CHECK_EQ(2, terminate->InputCount());
- CHECK_EQ(1, terminate->op()->EffectInputCount());
- CHECK_EQ(1, terminate->op()->ControlInputCount());
- CheckInputs(terminate, effect, loop);
-}
-
-
-TEST(CNonTermLoop_terminate2) {
- ControlReducerTester R;
- Node* loop = R.graph.NewNode(R.common.Loop(2), R.start, R.start);
- Node* effect1 = R.SetSelfReferences(
- R.graph.NewNode(R.common.EffectPhi(2), R.start, R.self, loop));
- Node* effect2 = R.SetSelfReferences(
- R.graph.NewNode(R.common.EffectPhi(2), R.start, R.self, loop));
- Branch b(R, R.one, loop);
- loop->ReplaceInput(1, b.if_true);
- Node* end = R.graph.end();
- end->ReplaceInput(0, b.if_false);
-
- R.ReduceGraph();
-
- CheckLoop(loop, R.start, loop);
- CHECK_EQ(end, R.graph.end());
- Node* terminate = end->InputAt(0);
- CHECK_EQ(IrOpcode::kTerminate, terminate->opcode());
- CHECK_EQ(3, terminate->InputCount());
- CHECK_EQ(2, terminate->op()->EffectInputCount());
- CHECK_EQ(1, terminate->op()->ControlInputCount());
- Node* e0 = terminate->InputAt(0);
- Node* e1 = terminate->InputAt(1);
- CHECK(e0 == effect1 || e1 == effect1);
- CHECK(e0 == effect2 || e1 == effect2);
- CHECK_EQ(loop, terminate->InputAt(2));
-}
-
-
-TEST(CNonTermLoop_terminate_m1) {
- ControlReducerTester R;
- Node* loop =
- R.SetSelfReferences(R.graph.NewNode(R.common.Loop(2), R.start, R.self));
- Node* effect = R.SetSelfReferences(
- R.graph.NewNode(R.common.EffectPhi(2), R.start, R.self, loop));
- R.ReduceGraph();
- Node* end = R.graph.end();
- CHECK_EQ(R.start, loop->InputAt(0));
- CHECK_EQ(loop, loop->InputAt(1));
- Node* merge = end->InputAt(0);
- CHECK_EQ(IrOpcode::kMerge, merge->opcode());
- CHECK_EQ(2, merge->InputCount());
- CHECK_EQ(2, merge->op()->ControlInputCount());
- CHECK_EQ(R.start, merge->InputAt(0));
-
- Node* terminate = merge->InputAt(1);
- CHECK_EQ(IrOpcode::kTerminate, terminate->opcode());
- CHECK_EQ(2, terminate->InputCount());
- CHECK_EQ(1, terminate->op()->EffectInputCount());
- CHECK_EQ(1, terminate->op()->ControlInputCount());
- CHECK_EQ(effect, terminate->InputAt(0));
- CHECK_EQ(loop, terminate->InputAt(1));
-}
-
-
-TEST(CNonTermLoop_big1) {
- ControlReducerTester R;
- Branch b1(R, R.p0);
- Node* rt = R.graph.NewNode(R.common.Return(), R.one, R.start, b1.if_true);
-
- Branch b2(R, R.p0, b1.if_false);
- Node* rf = R.graph.NewNode(R.common.Return(), R.zero, R.start, b2.if_true);
- Node* loop = R.SetSelfReferences(
- R.graph.NewNode(R.common.Loop(2), b2.if_false, R.self));
- Node* merge = R.graph.NewNode(R.common.Merge(2), rt, rf);
- R.end->ReplaceInput(0, merge);
-
- R.ReduceGraph();
-
- CheckInputs(R.end, merge);
- CheckInputs(merge, rt, rf, loop);
- CheckInputs(loop, b2.if_false, loop);
-}
-
-
-TEST(CNonTermLoop_big2) {
- ControlReducerTester R;
- Branch b1(R, R.p0);
- Node* rt = R.graph.NewNode(R.common.Return(), R.one, R.start, b1.if_true);
-
- Branch b2(R, R.zero, b1.if_false);
- Node* rf = R.graph.NewNode(R.common.Return(), R.zero, R.start, b2.if_true);
- Node* loop = R.SetSelfReferences(
- R.graph.NewNode(R.common.Loop(2), b2.if_false, R.self));
- Node* merge = R.graph.NewNode(R.common.Merge(2), rt, rf);
- R.end->ReplaceInput(0, merge);
-
- R.ReduceGraph();
-
- Node* new_merge = R.end->InputAt(0); // old merge was reduced.
- CHECK_NE(merge, new_merge);
- CheckInputs(new_merge, rt, loop);
- CheckInputs(loop, b1.if_false, loop);
- CHECK(merge->IsDead());
- CHECK(rf->IsDead());
- CHECK(b2.if_true->IsDead());
-}
-
-
TEST(Return1) {
ControlReducerTester R;
Node* ret = R.Return(R.one, R.start, R.start);
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index ea6f4ee830..818a0bd238 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -137,15 +137,15 @@ class ParallelMoveCreator : public HandleAndZoneScope {
int index = rng_->NextInt(6);
switch (rng_->NextInt(5)) {
case 0:
- return ConstantOperand::Create(index, main_zone());
+ return ConstantOperand::New(index, main_zone());
case 1:
- return StackSlotOperand::Create(index, main_zone());
+ return StackSlotOperand::New(index, main_zone());
case 2:
- return DoubleStackSlotOperand::Create(index, main_zone());
+ return DoubleStackSlotOperand::New(index, main_zone());
case 3:
- return RegisterOperand::Create(index, main_zone());
+ return RegisterOperand::New(index, main_zone());
case 4:
- return DoubleRegisterOperand::Create(index, main_zone());
+ return DoubleRegisterOperand::New(index, main_zone());
}
UNREACHABLE();
return NULL;
diff --git a/deps/v8/test/cctest/compiler/test-graph-reducer.cc b/deps/v8/test/cctest/compiler/test-graph-reducer.cc
deleted file mode 100644
index 70b57b9da9..0000000000
--- a/deps/v8/test/cctest/compiler/test-graph-reducer.cc
+++ /dev/null
@@ -1,622 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "graph-tester.h"
-#include "src/compiler/graph-reducer.h"
-
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-
-const uint8_t OPCODE_A0 = 10;
-const uint8_t OPCODE_A1 = 11;
-const uint8_t OPCODE_A2 = 12;
-const uint8_t OPCODE_B0 = 20;
-const uint8_t OPCODE_B1 = 21;
-const uint8_t OPCODE_B2 = 22;
-const uint8_t OPCODE_C0 = 30;
-const uint8_t OPCODE_C1 = 31;
-const uint8_t OPCODE_C2 = 32;
-
-static Operator OPA0(OPCODE_A0, Operator::kNoWrite, "opa0", 0, 0, 0, 0, 0, 0);
-static Operator OPA1(OPCODE_A1, Operator::kNoWrite, "opa1", 1, 0, 0, 0, 0, 0);
-static Operator OPA2(OPCODE_A2, Operator::kNoWrite, "opa2", 2, 0, 0, 0, 0, 0);
-static Operator OPB0(OPCODE_B0, Operator::kNoWrite, "opa0", 0, 0, 0, 0, 0, 0);
-static Operator OPB1(OPCODE_B1, Operator::kNoWrite, "opa1", 1, 0, 0, 0, 0, 0);
-static Operator OPB2(OPCODE_B2, Operator::kNoWrite, "opa2", 2, 0, 0, 0, 0, 0);
-static Operator OPC0(OPCODE_C0, Operator::kNoWrite, "opc0", 0, 0, 0, 0, 0, 0);
-static Operator OPC1(OPCODE_C1, Operator::kNoWrite, "opc1", 1, 0, 0, 0, 0, 0);
-static Operator OPC2(OPCODE_C2, Operator::kNoWrite, "opc2", 2, 0, 0, 0, 0, 0);
-
-
-// Replaces all "A" operators with "B" operators without creating new nodes.
-class InPlaceABReducer : public Reducer {
- public:
- virtual Reduction Reduce(Node* node) {
- switch (node->op()->opcode()) {
- case OPCODE_A0:
- CHECK_EQ(0, node->InputCount());
- node->set_op(&OPB0);
- return Replace(node);
- case OPCODE_A1:
- CHECK_EQ(1, node->InputCount());
- node->set_op(&OPB1);
- return Replace(node);
- case OPCODE_A2:
- CHECK_EQ(2, node->InputCount());
- node->set_op(&OPB2);
- return Replace(node);
- }
- return NoChange();
- }
-};
-
-
-// Replaces all "A" operators with "B" operators by allocating new nodes.
-class NewABReducer : public Reducer {
- public:
- explicit NewABReducer(Graph* graph) : graph_(graph) {}
- virtual Reduction Reduce(Node* node) {
- switch (node->op()->opcode()) {
- case OPCODE_A0:
- CHECK_EQ(0, node->InputCount());
- return Replace(graph_->NewNode(&OPB0));
- case OPCODE_A1:
- CHECK_EQ(1, node->InputCount());
- return Replace(graph_->NewNode(&OPB1, node->InputAt(0)));
- case OPCODE_A2:
- CHECK_EQ(2, node->InputCount());
- return Replace(
- graph_->NewNode(&OPB2, node->InputAt(0), node->InputAt(1)));
- }
- return NoChange();
- }
- Graph* graph_;
-};
-
-
-// Replaces all "B" operators with "C" operators without creating new nodes.
-class InPlaceBCReducer : public Reducer {
- public:
- virtual Reduction Reduce(Node* node) {
- switch (node->op()->opcode()) {
- case OPCODE_B0:
- CHECK_EQ(0, node->InputCount());
- node->set_op(&OPC0);
- return Replace(node);
- case OPCODE_B1:
- CHECK_EQ(1, node->InputCount());
- node->set_op(&OPC1);
- return Replace(node);
- case OPCODE_B2:
- CHECK_EQ(2, node->InputCount());
- node->set_op(&OPC2);
- return Replace(node);
- }
- return NoChange();
- }
-};
-
-
-// Wraps all "OPA0" nodes in "OPB1" operators by allocating new nodes.
-class A0Wrapper FINAL : public Reducer {
- public:
- explicit A0Wrapper(Graph* graph) : graph_(graph) {}
- virtual Reduction Reduce(Node* node) OVERRIDE {
- switch (node->op()->opcode()) {
- case OPCODE_A0:
- CHECK_EQ(0, node->InputCount());
- return Replace(graph_->NewNode(&OPB1, node));
- }
- return NoChange();
- }
- Graph* graph_;
-};
-
-
-// Wraps all "OPB0" nodes in two "OPC1" operators by allocating new nodes.
-class B0Wrapper FINAL : public Reducer {
- public:
- explicit B0Wrapper(Graph* graph) : graph_(graph) {}
- virtual Reduction Reduce(Node* node) OVERRIDE {
- switch (node->op()->opcode()) {
- case OPCODE_B0:
- CHECK_EQ(0, node->InputCount());
- return Replace(graph_->NewNode(&OPC1, graph_->NewNode(&OPC1, node)));
- }
- return NoChange();
- }
- Graph* graph_;
-};
-
-
-// Replaces all "OPA1" nodes with the first input.
-class A1Forwarder : public Reducer {
- virtual Reduction Reduce(Node* node) {
- switch (node->op()->opcode()) {
- case OPCODE_A1:
- CHECK_EQ(1, node->InputCount());
- return Replace(node->InputAt(0));
- }
- return NoChange();
- }
-};
-
-
-// Replaces all "OPB1" nodes with the first input.
-class B1Forwarder : public Reducer {
- virtual Reduction Reduce(Node* node) {
- switch (node->op()->opcode()) {
- case OPCODE_B1:
- CHECK_EQ(1, node->InputCount());
- return Replace(node->InputAt(0));
- }
- return NoChange();
- }
-};
-
-
-// Swaps the inputs to "OP2A" and "OP2B" nodes based on ids.
-class AB2Sorter : public Reducer {
- virtual Reduction Reduce(Node* node) {
- switch (node->op()->opcode()) {
- case OPCODE_A2:
- case OPCODE_B2:
- CHECK_EQ(2, node->InputCount());
- Node* x = node->InputAt(0);
- Node* y = node->InputAt(1);
- if (x->id() > y->id()) {
- node->ReplaceInput(0, y);
- node->ReplaceInput(1, x);
- return Replace(node);
- }
- }
- return NoChange();
- }
-};
-
-
-// Simply records the nodes visited.
-class ReducerRecorder : public Reducer {
- public:
- explicit ReducerRecorder(Zone* zone)
- : set(NodeSet::key_compare(), NodeSet::allocator_type(zone)) {}
- virtual Reduction Reduce(Node* node) {
- set.insert(node);
- return NoChange();
- }
- void CheckContains(Node* node) {
- CHECK_EQ(1, static_cast<int>(set.count(node)));
- }
- NodeSet set;
-};
-
-
-TEST(ReduceGraphFromEnd1) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* end = graph.NewNode(&OPA1, n1);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- ReducerRecorder recorder(graph.zone());
- reducer.AddReducer(&recorder);
- reducer.ReduceGraph();
- recorder.CheckContains(n1);
- recorder.CheckContains(end);
-}
-
-
-TEST(ReduceGraphFromEnd2) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* n2 = graph.NewNode(&OPA1, n1);
- Node* n3 = graph.NewNode(&OPA1, n1);
- Node* end = graph.NewNode(&OPA2, n2, n3);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- ReducerRecorder recorder(graph.zone());
- reducer.AddReducer(&recorder);
- reducer.ReduceGraph();
- recorder.CheckContains(n1);
- recorder.CheckContains(n2);
- recorder.CheckContains(n3);
- recorder.CheckContains(end);
-}
-
-
-TEST(ReduceInPlace1) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* end = graph.NewNode(&OPA1, n1);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- InPlaceABReducer r;
- reducer.AddReducer(&r);
-
- // Tests A* => B* with in-place updates.
- for (int i = 0; i < 3; i++) {
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- CHECK_EQ(before, graph.NodeCount());
- CHECK_EQ(&OPB0, n1->op());
- CHECK_EQ(&OPB1, end->op());
- CHECK_EQ(n1, end->InputAt(0));
- }
-}
-
-
-TEST(ReduceInPlace2) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* n2 = graph.NewNode(&OPA1, n1);
- Node* n3 = graph.NewNode(&OPA1, n1);
- Node* end = graph.NewNode(&OPA2, n2, n3);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- InPlaceABReducer r;
- reducer.AddReducer(&r);
-
- // Tests A* => B* with in-place updates.
- for (int i = 0; i < 3; i++) {
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- CHECK_EQ(before, graph.NodeCount());
- CHECK_EQ(&OPB0, n1->op());
- CHECK_EQ(&OPB1, n2->op());
- CHECK_EQ(n1, n2->InputAt(0));
- CHECK_EQ(&OPB1, n3->op());
- CHECK_EQ(n1, n3->InputAt(0));
- CHECK_EQ(&OPB2, end->op());
- CHECK_EQ(n2, end->InputAt(0));
- CHECK_EQ(n3, end->InputAt(1));
- }
-}
-
-
-TEST(ReduceNew1) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* n2 = graph.NewNode(&OPA1, n1);
- Node* n3 = graph.NewNode(&OPA1, n1);
- Node* end = graph.NewNode(&OPA2, n2, n3);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- NewABReducer r(&graph);
- reducer.AddReducer(&r);
-
- // Tests A* => B* while creating new nodes.
- for (int i = 0; i < 3; i++) {
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- if (i == 0) {
- CHECK_NE(before, graph.NodeCount());
- } else {
- CHECK_EQ(before, graph.NodeCount());
- }
- Node* nend = graph.end();
- CHECK_NE(end, nend); // end() should be updated too.
-
- Node* nn2 = nend->InputAt(0);
- Node* nn3 = nend->InputAt(1);
- Node* nn1 = nn2->InputAt(0);
-
- CHECK_EQ(nn1, nn3->InputAt(0));
-
- CHECK_EQ(&OPB0, nn1->op());
- CHECK_EQ(&OPB1, nn2->op());
- CHECK_EQ(&OPB1, nn3->op());
- CHECK_EQ(&OPB2, nend->op());
- }
-}
-
-
-TEST(Wrapping1) {
- GraphTester graph;
-
- Node* end = graph.NewNode(&OPA0);
- graph.SetEnd(end);
- CHECK_EQ(1, graph.NodeCount());
-
- GraphReducer reducer(&graph, graph.zone());
- A0Wrapper r(&graph);
- reducer.AddReducer(&r);
-
- reducer.ReduceGraph();
- CHECK_EQ(2, graph.NodeCount());
-
- Node* nend = graph.end();
- CHECK_NE(end, nend);
- CHECK_EQ(&OPB1, nend->op());
- CHECK_EQ(1, nend->InputCount());
- CHECK_EQ(end, nend->InputAt(0));
-}
-
-
-TEST(Wrapping2) {
- GraphTester graph;
-
- Node* end = graph.NewNode(&OPB0);
- graph.SetEnd(end);
- CHECK_EQ(1, graph.NodeCount());
-
- GraphReducer reducer(&graph, graph.zone());
- B0Wrapper r(&graph);
- reducer.AddReducer(&r);
-
- reducer.ReduceGraph();
- CHECK_EQ(3, graph.NodeCount());
-
- Node* nend = graph.end();
- CHECK_NE(end, nend);
- CHECK_EQ(&OPC1, nend->op());
- CHECK_EQ(1, nend->InputCount());
-
- Node* n1 = nend->InputAt(0);
- CHECK_NE(end, n1);
- CHECK_EQ(&OPC1, n1->op());
- CHECK_EQ(1, n1->InputCount());
- CHECK_EQ(end, n1->InputAt(0));
-}
-
-
-TEST(Forwarding1) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* end = graph.NewNode(&OPA1, n1);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- A1Forwarder r;
- reducer.AddReducer(&r);
-
- // Tests A1(x) => x
- for (int i = 0; i < 3; i++) {
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- CHECK_EQ(before, graph.NodeCount());
- CHECK_EQ(&OPA0, n1->op());
- CHECK_EQ(n1, graph.end());
- }
-}
-
-
-TEST(Forwarding2) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* n2 = graph.NewNode(&OPA1, n1);
- Node* n3 = graph.NewNode(&OPA1, n1);
- Node* end = graph.NewNode(&OPA2, n2, n3);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- A1Forwarder r;
- reducer.AddReducer(&r);
-
- // Tests reducing A2(A1(x), A1(y)) => A2(x, y).
- for (int i = 0; i < 3; i++) {
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- CHECK_EQ(before, graph.NodeCount());
- CHECK_EQ(&OPA0, n1->op());
- CHECK_EQ(n1, end->InputAt(0));
- CHECK_EQ(n1, end->InputAt(1));
- CHECK_EQ(&OPA2, end->op());
- CHECK_EQ(0, n2->UseCount());
- CHECK_EQ(0, n3->UseCount());
- }
-}
-
-
-TEST(Forwarding3) {
- // Tests reducing a chain of A1(A1(A1(A1(x)))) => x.
- for (int i = 0; i < 8; i++) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* end = n1;
- for (int j = 0; j < i; j++) {
- end = graph.NewNode(&OPA1, end);
- }
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- A1Forwarder r;
- reducer.AddReducer(&r);
-
- for (int i = 0; i < 3; i++) {
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- CHECK_EQ(before, graph.NodeCount());
- CHECK_EQ(&OPA0, n1->op());
- CHECK_EQ(n1, graph.end());
- }
- }
-}
-
-
-TEST(ReduceForward1) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* n2 = graph.NewNode(&OPA1, n1);
- Node* n3 = graph.NewNode(&OPA1, n1);
- Node* end = graph.NewNode(&OPA2, n2, n3);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- InPlaceABReducer r;
- B1Forwarder f;
- reducer.AddReducer(&r);
- reducer.AddReducer(&f);
-
- // Tests first reducing A => B, then B1(x) => x.
- for (int i = 0; i < 3; i++) {
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- CHECK_EQ(before, graph.NodeCount());
- CHECK_EQ(&OPB0, n1->op());
- CHECK(n2->IsDead());
- CHECK_EQ(n1, end->InputAt(0));
- CHECK(n3->IsDead());
- CHECK_EQ(n1, end->InputAt(0));
- CHECK_EQ(&OPB2, end->op());
- CHECK_EQ(0, n2->UseCount());
- CHECK_EQ(0, n3->UseCount());
- }
-}
-
-
-TEST(Sorter1) {
- HandleAndZoneScope scope;
- AB2Sorter r;
- for (int i = 0; i < 6; i++) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* n2 = graph.NewNode(&OPA1, n1);
- Node* n3 = graph.NewNode(&OPA1, n1);
- Node* end = NULL; // Initialize to please the compiler.
-
- if (i == 0) end = graph.NewNode(&OPA2, n2, n3);
- if (i == 1) end = graph.NewNode(&OPA2, n3, n2);
- if (i == 2) end = graph.NewNode(&OPA2, n2, n1);
- if (i == 3) end = graph.NewNode(&OPA2, n1, n2);
- if (i == 4) end = graph.NewNode(&OPA2, n3, n1);
- if (i == 5) end = graph.NewNode(&OPA2, n1, n3);
-
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- reducer.AddReducer(&r);
-
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- CHECK_EQ(before, graph.NodeCount());
- CHECK_EQ(&OPA0, n1->op());
- CHECK_EQ(&OPA1, n2->op());
- CHECK_EQ(&OPA1, n3->op());
- CHECK_EQ(&OPA2, end->op());
- CHECK_EQ(end, graph.end());
- CHECK(end->InputAt(0)->id() <= end->InputAt(1)->id());
- }
-}
-
-
-// Generate a node graph with the given permutations.
-void GenDAG(Graph* graph, int* p3, int* p2, int* p1) {
- Node* level4 = graph->NewNode(&OPA0);
- Node* level3[] = {graph->NewNode(&OPA1, level4),
- graph->NewNode(&OPA1, level4)};
-
- Node* level2[] = {graph->NewNode(&OPA1, level3[p3[0]]),
- graph->NewNode(&OPA1, level3[p3[1]]),
- graph->NewNode(&OPA1, level3[p3[0]]),
- graph->NewNode(&OPA1, level3[p3[1]])};
-
- Node* level1[] = {graph->NewNode(&OPA2, level2[p2[0]], level2[p2[1]]),
- graph->NewNode(&OPA2, level2[p2[2]], level2[p2[3]])};
-
- Node* end = graph->NewNode(&OPA2, level1[p1[0]], level1[p1[1]]);
- graph->SetEnd(end);
-}
-
-
-TEST(SortForwardReduce) {
- GraphTester graph;
-
- // Tests combined reductions on a series of DAGs.
- for (int j = 0; j < 2; j++) {
- int p3[] = {j, 1 - j};
- for (int m = 0; m < 2; m++) {
- int p1[] = {m, 1 - m};
- for (int k = 0; k < 24; k++) { // All permutations of 0, 1, 2, 3
- int p2[] = {-1, -1, -1, -1};
- int n = k;
- for (int d = 4; d >= 1; d--) { // Construct permutation.
- int p = n % d;
- for (int z = 0; z < 4; z++) {
- if (p2[z] == -1) {
- if (p == 0) p2[z] = d - 1;
- p--;
- }
- }
- n = n / d;
- }
-
- GenDAG(&graph, p3, p2, p1);
-
- GraphReducer reducer(&graph, graph.zone());
- AB2Sorter r1;
- A1Forwarder r2;
- InPlaceABReducer r3;
- reducer.AddReducer(&r1);
- reducer.AddReducer(&r2);
- reducer.AddReducer(&r3);
-
- reducer.ReduceGraph();
-
- Node* end = graph.end();
- CHECK_EQ(&OPB2, end->op());
- Node* n1 = end->InputAt(0);
- Node* n2 = end->InputAt(1);
- CHECK_NE(n1, n2);
- CHECK(n1->id() < n2->id());
- CHECK_EQ(&OPB2, n1->op());
- CHECK_EQ(&OPB2, n2->op());
- Node* n4 = n1->InputAt(0);
- CHECK_EQ(&OPB0, n4->op());
- CHECK_EQ(n4, n1->InputAt(1));
- CHECK_EQ(n4, n2->InputAt(0));
- CHECK_EQ(n4, n2->InputAt(1));
- }
- }
- }
-}
-
-
-TEST(Order) {
- // Test that the order of reducers doesn't matter, as they should be
- // rerun for changed nodes.
- for (int i = 0; i < 2; i++) {
- GraphTester graph;
-
- Node* n1 = graph.NewNode(&OPA0);
- Node* end = graph.NewNode(&OPA1, n1);
- graph.SetEnd(end);
-
- GraphReducer reducer(&graph, graph.zone());
- InPlaceABReducer abr;
- InPlaceBCReducer bcr;
- if (i == 0) {
- reducer.AddReducer(&abr);
- reducer.AddReducer(&bcr);
- } else {
- reducer.AddReducer(&bcr);
- reducer.AddReducer(&abr);
- }
-
- // Tests A* => C* with in-place updates.
- for (int i = 0; i < 3; i++) {
- int before = graph.NodeCount();
- reducer.ReduceGraph();
- CHECK_EQ(before, graph.NodeCount());
- CHECK_EQ(&OPC0, n1->op());
- CHECK_EQ(&OPC1, end->op());
- CHECK_EQ(n1, end->InputAt(0));
- }
- }
-}
diff --git a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
index ce3e6b71e9..702e99db15 100644
--- a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
+++ b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
@@ -14,6 +14,7 @@
#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
+#include "src/compiler/source-position.h"
#include "src/compiler/verifier.h"
using namespace v8::internal;
@@ -33,7 +34,8 @@ TEST(NodeWithNullInputReachableFromEnd) {
OFStream os(stdout);
os << AsDOT(graph);
- os << AsJSON(graph);
+ SourcePositionTable table(&graph);
+ os << AsJSON(graph, &table);
}
@@ -51,7 +53,8 @@ TEST(NodeWithNullControlReachableFromEnd) {
OFStream os(stdout);
os << AsDOT(graph);
- os << AsJSON(graph);
+ SourcePositionTable table(&graph);
+ os << AsJSON(graph, &table);
}
@@ -69,7 +72,8 @@ TEST(NodeWithNullInputReachableFromStart) {
OFStream os(stdout);
os << AsDOT(graph);
- os << AsJSON(graph);
+ SourcePositionTable table(&graph);
+ os << AsJSON(graph, &table);
}
@@ -86,5 +90,6 @@ TEST(NodeWithNullControlReachableFromStart) {
OFStream os(stdout);
os << AsDOT(graph);
- os << AsJSON(graph);
+ SourcePositionTable table(&graph);
+ os << AsJSON(graph, &table);
}
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index 294812fdfb..85cc870e9d 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -30,8 +30,8 @@ class InstructionTester : public HandleAndZoneScope {
: isolate(main_isolate()),
graph(zone()),
schedule(zone()),
- info(static_cast<HydrogenCodeStub*>(NULL), main_isolate()),
- linkage(zone(), &info),
+ fake_stub(main_isolate()),
+ info(&fake_stub, main_isolate()),
common(zone()),
machine(zone()),
code(NULL) {}
@@ -39,8 +39,8 @@ class InstructionTester : public HandleAndZoneScope {
Isolate* isolate;
Graph graph;
Schedule schedule;
+ FakeStubForTesting fake_stub;
CompilationInfoWithZone info;
- Linkage linkage;
CommonOperatorBuilder common;
MachineOperatorBuilder machine;
TestInstrSeq* code;
@@ -55,7 +55,8 @@ class InstructionTester : public HandleAndZoneScope {
}
InstructionBlocks* instruction_blocks =
TestInstrSeq::InstructionBlocksFor(main_zone(), &schedule);
- code = new (main_zone()) TestInstrSeq(main_zone(), instruction_blocks);
+ code = new (main_zone())
+ TestInstrSeq(main_isolate(), main_zone(), instruction_blocks);
}
Node* Int32Constant(int32_t val) {
@@ -89,10 +90,7 @@ class InstructionTester : public HandleAndZoneScope {
}
UnallocatedOperand* NewUnallocated(int vreg) {
- UnallocatedOperand* unallocated =
- new (zone()) UnallocatedOperand(UnallocatedOperand::ANY);
- unallocated->set_virtual_register(vreg);
- return unallocated;
+ return UnallocatedOperand(UnallocatedOperand::ANY, vreg).Copy(zone());
}
InstructionBlock* BlockAt(BasicBlock* block) {
@@ -131,13 +129,10 @@ TEST(InstructionBasic) {
BasicBlockVector* blocks = R.schedule.rpo_order();
CHECK_EQ(static_cast<int>(blocks->size()), R.code->InstructionBlockCount());
- int index = 0;
- for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end();
- i++, index++) {
- BasicBlock* block = *i;
+ for (auto block : *blocks) {
CHECK_EQ(block->rpo_number(), R.BlockAt(block)->rpo_number().ToInt());
CHECK_EQ(block->id().ToInt(), R.BlockAt(block)->id().ToInt());
- CHECK_EQ(NULL, block->loop_end());
+ CHECK(!block->loop_end());
}
}
@@ -214,14 +209,10 @@ TEST(InstructionIsGapAt) {
R.code->AddInstruction(g);
R.code->EndBlock(b0->GetRpoNumber());
- CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
-
- CHECK_EQ(true, R.code->IsGapAt(0)); // Label
- CHECK_EQ(true, R.code->IsGapAt(1)); // Gap
- CHECK_EQ(false, R.code->IsGapAt(2)); // i0
- CHECK_EQ(true, R.code->IsGapAt(3)); // Gap
- CHECK_EQ(true, R.code->IsGapAt(4)); // Gap
- CHECK_EQ(false, R.code->IsGapAt(5)); // g
+ CHECK(R.code->instructions().size() == 4);
+ for (size_t i = 0; i < R.code->instructions().size(); ++i) {
+ CHECK_EQ(i % 2 == 0, R.code->instructions()[i]->IsGapMoves());
+ }
}
@@ -248,23 +239,10 @@ TEST(InstructionIsGapAt2) {
R.code->AddInstruction(g1);
R.code->EndBlock(b1->GetRpoNumber());
- CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
-
- CHECK_EQ(true, R.code->IsGapAt(0)); // Label
- CHECK_EQ(true, R.code->IsGapAt(1)); // Gap
- CHECK_EQ(false, R.code->IsGapAt(2)); // i0
- CHECK_EQ(true, R.code->IsGapAt(3)); // Gap
- CHECK_EQ(true, R.code->IsGapAt(4)); // Gap
- CHECK_EQ(false, R.code->IsGapAt(5)); // g
-
- CHECK_EQ(true, R.code->InstructionAt(6)->IsBlockStart());
-
- CHECK_EQ(true, R.code->IsGapAt(6)); // Label
- CHECK_EQ(true, R.code->IsGapAt(7)); // Gap
- CHECK_EQ(false, R.code->IsGapAt(8)); // i1
- CHECK_EQ(true, R.code->IsGapAt(9)); // Gap
- CHECK_EQ(true, R.code->IsGapAt(10)); // Gap
- CHECK_EQ(false, R.code->IsGapAt(11)); // g1
+ CHECK(R.code->instructions().size() == 8);
+ for (size_t i = 0; i < R.code->instructions().size(); ++i) {
+ CHECK_EQ(i % 2 == 0, R.code->instructions()[i]->IsGapMoves());
+ }
}
@@ -282,16 +260,12 @@ TEST(InstructionAddGapMove) {
R.code->AddInstruction(g);
R.code->EndBlock(b0->GetRpoNumber());
- CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
-
- CHECK_EQ(true, R.code->IsGapAt(0)); // Label
- CHECK_EQ(true, R.code->IsGapAt(1)); // Gap
- CHECK_EQ(false, R.code->IsGapAt(2)); // i0
- CHECK_EQ(true, R.code->IsGapAt(3)); // Gap
- CHECK_EQ(true, R.code->IsGapAt(4)); // Gap
- CHECK_EQ(false, R.code->IsGapAt(5)); // g
+ CHECK(R.code->instructions().size() == 4);
+ for (size_t i = 0; i < R.code->instructions().size(); ++i) {
+ CHECK_EQ(i % 2 == 0, R.code->instructions()[i]->IsGapMoves());
+ }
- int indexes[] = {0, 1, 3, 4, -1};
+ int indexes[] = {0, 2, -1};
for (int i = 0; indexes[i] >= 0; i++) {
int index = indexes[i];
@@ -301,7 +275,7 @@ TEST(InstructionAddGapMove) {
R.code->AddGapMove(index, op1, op2);
GapInstruction* gap = R.code->GapAt(index);
ParallelMove* move = gap->GetParallelMove(GapInstruction::START);
- CHECK_NE(NULL, move);
+ CHECK(move);
const ZoneList<MoveOperands>* move_operands = move->move_operands();
CHECK_EQ(1, move_operands->length());
MoveOperands* cur = &move_operands->at(0);
@@ -312,7 +286,7 @@ TEST(InstructionAddGapMove) {
TEST(InstructionOperands) {
- Zone zone(CcTest::InitIsolateOnce());
+ Zone zone;
{
TestInstr* i = TestInstr::New(&zone, 101);
@@ -321,23 +295,24 @@ TEST(InstructionOperands) {
CHECK_EQ(0, static_cast<int>(i->TempCount()));
}
- InstructionOperand* outputs[] = {
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
-
- InstructionOperand* inputs[] = {
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
-
- InstructionOperand* temps[] = {
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
- new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+ int vreg = 15;
+ InstructionOperand outputs[] = {
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg),
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg),
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg),
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg)};
+
+ InstructionOperand inputs[] = {
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg),
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg),
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg),
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg)};
+
+ InstructionOperand temps[] = {
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg),
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg),
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg),
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg)};
for (size_t i = 0; i < arraysize(outputs); i++) {
for (size_t j = 0; j < arraysize(inputs); j++) {
@@ -349,15 +324,15 @@ TEST(InstructionOperands) {
CHECK(k == m->TempCount());
for (size_t z = 0; z < i; z++) {
- CHECK_EQ(outputs[z], m->OutputAt(z));
+ CHECK(outputs[z].Equals(m->OutputAt(z)));
}
for (size_t z = 0; z < j; z++) {
- CHECK_EQ(inputs[z], m->InputAt(z));
+ CHECK(inputs[z].Equals(m->InputAt(z)));
}
for (size_t z = 0; z < k; z++) {
- CHECK_EQ(temps[z], m->TempAt(z));
+ CHECK(temps[z].Equals(m->TempAt(z)));
}
}
}
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index 8588f66f1f..630f911c5e 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -6,7 +6,7 @@
#include "src/assembler.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/typer.h"
#include "src/types.h"
#include "test/cctest/cctest.h"
@@ -17,11 +17,11 @@ using namespace v8::internal::compiler;
class JSCacheTesterHelper {
protected:
- explicit JSCacheTesterHelper(Zone* zone)
+ JSCacheTesterHelper(Isolate* isolate, Zone* zone)
: main_graph_(zone),
main_common_(zone),
main_javascript_(zone),
- main_typer_(&main_graph_, MaybeHandle<Context>()),
+ main_typer_(isolate, &main_graph_, MaybeHandle<Context>()),
main_machine_(zone) {}
Graph main_graph_;
CommonOperatorBuilder main_common_;
@@ -37,8 +37,8 @@ class JSConstantCacheTester : public HandleAndZoneScope,
public JSGraph {
public:
JSConstantCacheTester()
- : JSCacheTesterHelper(main_zone()),
- JSGraph(&main_graph_, &main_common_, &main_javascript_,
+ : JSCacheTesterHelper(main_isolate(), main_zone()),
+ JSGraph(main_isolate(), &main_graph_, &main_common_, &main_javascript_,
&main_machine_) {
main_graph_.SetStart(main_graph_.NewNode(common()->Start(0)));
main_graph_.SetEnd(main_graph_.NewNode(common()->End()));
@@ -65,7 +65,7 @@ TEST(ZeroConstant1) {
CHECK_EQ(zero, T.Constant(0));
CHECK_NE(zero, T.Constant(-0.0));
CHECK_NE(zero, T.Constant(1.0));
- CHECK_NE(zero, T.Constant(v8::base::OS::nan_value()));
+ CHECK_NE(zero, T.Constant(std::numeric_limits<double>::quiet_NaN()));
CHECK_NE(zero, T.Float64Constant(0));
CHECK_NE(zero, T.Int32Constant(0));
@@ -103,10 +103,10 @@ TEST(MinusZeroConstant) {
double zero_value = OpParameter<double>(zero);
double minus_zero_value = OpParameter<double>(minus_zero);
- CHECK_EQ(0.0, zero_value);
- CHECK_NE(-0.0, zero_value);
- CHECK_EQ(-0.0, minus_zero_value);
- CHECK_NE(0.0, minus_zero_value);
+ CHECK(bit_cast<uint64_t>(0.0) == bit_cast<uint64_t>(zero_value));
+ CHECK(bit_cast<uint64_t>(-0.0) != bit_cast<uint64_t>(zero_value));
+ CHECK(bit_cast<uint64_t>(0.0) != bit_cast<uint64_t>(minus_zero_value));
+ CHECK(bit_cast<uint64_t>(-0.0) == bit_cast<uint64_t>(minus_zero_value));
}
@@ -119,7 +119,7 @@ TEST(ZeroConstant2) {
CHECK_EQ(zero, T.ZeroConstant());
CHECK_NE(zero, T.Constant(-0.0));
CHECK_NE(zero, T.Constant(1.0));
- CHECK_NE(zero, T.Constant(v8::base::OS::nan_value()));
+ CHECK_NE(zero, T.Constant(std::numeric_limits<double>::quiet_NaN()));
CHECK_NE(zero, T.Float64Constant(0));
CHECK_NE(zero, T.Int32Constant(0));
@@ -144,7 +144,7 @@ TEST(OneConstant1) {
CHECK_EQ(one, T.Constant(1.0));
CHECK_NE(one, T.Constant(1.01));
CHECK_NE(one, T.Constant(-1.01));
- CHECK_NE(one, T.Constant(v8::base::OS::nan_value()));
+ CHECK_NE(one, T.Constant(std::numeric_limits<double>::quiet_NaN()));
CHECK_NE(one, T.Float64Constant(1.0));
CHECK_NE(one, T.Int32Constant(1));
@@ -169,7 +169,7 @@ TEST(OneConstant2) {
CHECK_EQ(one, T.Constant(1.0));
CHECK_NE(one, T.Constant(1.01));
CHECK_NE(one, T.Constant(-1.01));
- CHECK_NE(one, T.Constant(v8::base::OS::nan_value()));
+ CHECK_NE(one, T.Constant(std::numeric_limits<double>::quiet_NaN()));
CHECK_NE(one, T.Float64Constant(1.0));
CHECK_NE(one, T.Int32Constant(1));
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index fb7bd946f2..e2cb8e6ac9 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -5,7 +5,7 @@
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/source-position.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/function-tester.h"
@@ -18,20 +18,19 @@ class ContextSpecializationTester : public HandleAndZoneScope,
public DirectGraphBuilder {
public:
ContextSpecializationTester()
- : DirectGraphBuilder(new (main_zone()) Graph(main_zone())),
+ : DirectGraphBuilder(main_isolate(),
+ new (main_zone()) Graph(main_zone())),
common_(main_zone()),
javascript_(main_zone()),
machine_(main_zone()),
simplified_(main_zone()),
- jsgraph_(graph(), common(), &javascript_, &machine_),
- info_(main_isolate(), main_zone()) {}
+ jsgraph_(main_isolate(), graph(), common(), &javascript_, &machine_) {}
Factory* factory() { return main_isolate()->factory(); }
CommonOperatorBuilder* common() { return &common_; }
JSOperatorBuilder* javascript() { return &javascript_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
JSGraph* jsgraph() { return &jsgraph_; }
- CompilationInfo* info() { return &info_; }
private:
CommonOperatorBuilder common_;
@@ -39,7 +38,6 @@ class ContextSpecializationTester : public HandleAndZoneScope,
MachineOperatorBuilder machine_;
SimplifiedOperatorBuilder simplified_;
JSGraph jsgraph_;
- CompilationInfo info_;
};
@@ -62,7 +60,7 @@ TEST(ReduceJSLoadContext) {
Node* const_context = t.jsgraph()->Constant(native);
Node* deep_const_context = t.jsgraph()->Constant(subcontext2);
Node* param_context = t.NewNode(t.common()->Parameter(0), start);
- JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+ JSContextSpecializer spec(t.jsgraph());
{
// Mutable slot, constant context, depth = 0 => do nothing.
@@ -134,7 +132,7 @@ TEST(ReduceJSStoreContext) {
Node* const_context = t.jsgraph()->Constant(native);
Node* deep_const_context = t.jsgraph()->Constant(subcontext2);
Node* param_context = t.NewNode(t.common()->Parameter(0), start);
- JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+ JSContextSpecializer spec(t.jsgraph());
{
// Mutable slot, constant context, depth = 0 => do nothing.
@@ -196,11 +194,10 @@ TEST(SpecializeToContext) {
Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
const int slot = Context::GLOBAL_OBJECT_INDEX;
native->set(slot, *expected);
- t.info()->SetContext(native);
Node* const_context = t.jsgraph()->Constant(native);
Node* param_context = t.NewNode(t.common()->Parameter(0), start);
- JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+ JSContextSpecializer spec(t.jsgraph());
{
// Check that specialization replaces values and forwards effects
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 3023837f4c..f4531d3e83 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -6,8 +6,9 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
+#include "src/compiler/operator-properties.h"
#include "src/compiler/typer.h"
#include "test/cctest/cctest.h"
@@ -25,7 +26,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
simplified(main_zone()),
common(main_zone()),
graph(main_zone()),
- typer(&graph, MaybeHandle<Context>()),
+ typer(main_isolate(), &graph, MaybeHandle<Context>()),
context_node(NULL) {
graph.SetStart(graph.NewNode(common.Start(num_parameters)));
graph.SetEnd(graph.NewNode(common.End()));
@@ -75,7 +76,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
}
Node* reduce(Node* node) {
- JSGraph jsgraph(&graph, &common, &javascript, &machine);
+ JSGraph jsgraph(main_isolate(), &graph, &common, &javascript, &machine);
JSTypedLowering reducer(&jsgraph, main_zone());
Reduction reduction = reducer.Reduce(node);
if (reduction.Changed()) return reduction.replacement();
@@ -113,18 +114,33 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* Binop(const Operator* op, Node* left, Node* right) {
// JS binops also require context, effect, and control
- return graph.NewNode(op, left, right, context(), start(), control());
+ if (OperatorProperties::HasFrameStateInput(op)) {
+ return graph.NewNode(op, left, right, context(),
+ EmptyFrameState(context()), start(), control());
+ } else {
+ return graph.NewNode(op, left, right, context(), start(), control());
+ }
}
Node* Unop(const Operator* op, Node* input) {
// JS unops also require context, effect, and control
- return graph.NewNode(op, input, context(), start(), control());
+ if (OperatorProperties::HasFrameStateInput(op)) {
+ return graph.NewNode(op, input, context(), EmptyFrameState(context()),
+ start(), control());
+ } else {
+ return graph.NewNode(op, input, context(), start(), control());
+ }
}
Node* UseForEffect(Node* node) {
// TODO(titzer): use EffectPhi after fixing EffectCount
- return graph.NewNode(javascript.ToNumber(), node, context(), node,
- control());
+ if (OperatorProperties::HasFrameStateInput(javascript.ToNumber())) {
+ return graph.NewNode(javascript.ToNumber(), node, context(),
+ EmptyFrameState(context()), node, control());
+ } else {
+ return graph.NewNode(javascript.ToNumber(), node, context(), node,
+ control());
+ }
}
void CheckEffectInput(Node* effect, Node* use) {
@@ -166,20 +182,17 @@ static Type* kStringTypes[] = {Type::InternalizedString(), Type::OtherString(),
Type::String()};
-static Type* kInt32Types[] = {
- Type::UnsignedSmall(), Type::NegativeSigned32(),
- Type::NonNegativeSigned32(), Type::SignedSmall(),
- Type::Signed32(), Type::Unsigned32(),
- Type::Integral32()};
+static Type* kInt32Types[] = {Type::UnsignedSmall(), Type::Negative32(),
+ Type::Unsigned31(), Type::SignedSmall(),
+ Type::Signed32(), Type::Unsigned32(),
+ Type::Integral32()};
static Type* kNumberTypes[] = {
- Type::UnsignedSmall(), Type::NegativeSigned32(),
- Type::NonNegativeSigned32(), Type::SignedSmall(),
- Type::Signed32(), Type::Unsigned32(),
- Type::Integral32(), Type::MinusZero(),
- Type::NaN(), Type::OrderedNumber(),
- Type::PlainNumber(), Type::Number()};
+ Type::UnsignedSmall(), Type::Negative32(), Type::Unsigned31(),
+ Type::SignedSmall(), Type::Signed32(), Type::Unsigned32(),
+ Type::Integral32(), Type::MinusZero(), Type::NaN(),
+ Type::OrderedNumber(), Type::PlainNumber(), Type::Number()};
static Type* kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
@@ -306,13 +319,12 @@ class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
TEST(Int32BitwiseShifts) {
JSBitwiseShiftTypedLoweringTester R;
- Type* types[] = {Type::SignedSmall(), Type::UnsignedSmall(),
- Type::NegativeSigned32(), Type::NonNegativeSigned32(),
- Type::Unsigned32(), Type::Signed32(),
- Type::MinusZero(), Type::NaN(),
- Type::Undefined(), Type::Null(),
- Type::Boolean(), Type::Number(),
- Type::PlainNumber(), Type::String()};
+ Type* types[] = {
+ Type::SignedSmall(), Type::UnsignedSmall(), Type::Negative32(),
+ Type::Unsigned31(), Type::Unsigned32(), Type::Signed32(),
+ Type::MinusZero(), Type::NaN(), Type::Undefined(),
+ Type::Null(), Type::Boolean(), Type::Number(),
+ Type::PlainNumber(), Type::String()};
for (size_t i = 0; i < arraysize(types); ++i) {
Node* p0 = R.Parameter(types[i], 0);
@@ -737,12 +749,25 @@ TEST(RemoveToNumberEffects) {
switch (i) {
case 0:
+ // TODO(jarin) Replace with a query of FLAG_turbo_deoptimization.
+ if (OperatorProperties::HasFrameStateInput(R.javascript.ToNumber())) {
+ effect_use = R.graph.NewNode(R.javascript.ToNumber(), p0, R.context(),
+ frame_state, ton, R.start());
+ } else {
effect_use = R.graph.NewNode(R.javascript.ToNumber(), p0, R.context(),
ton, R.start());
+ }
break;
case 1:
- effect_use = R.graph.NewNode(R.javascript.ToNumber(), ton, R.context(),
- ton, R.start());
+ // TODO(jarin) Replace with a query of FLAG_turbo_deoptimization.
+ if (OperatorProperties::HasFrameStateInput(R.javascript.ToNumber())) {
+ effect_use =
+ R.graph.NewNode(R.javascript.ToNumber(), ton, R.context(),
+ frame_state, ton, R.start());
+ } else {
+ effect_use = R.graph.NewNode(R.javascript.ToNumber(), ton,
+ R.context(), ton, R.start());
+ }
break;
case 2:
effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
@@ -777,7 +802,7 @@ TEST(RemoveToNumberEffects) {
}
}
- CHECK_EQ(NULL, effect_use); // should have done all cases above.
+ CHECK(!effect_use); // should have done all cases above.
}
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 74bf43d242..d9de18efad 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -20,7 +20,7 @@ class TestCode : public HandleAndZoneScope {
TestCode()
: HandleAndZoneScope(),
blocks_(main_zone()),
- sequence_(main_zone(), &blocks_),
+ sequence_(main_isolate(), main_zone(), &blocks_),
rpo_number_(RpoNumber::FromInt(0)),
current_(NULL) {}
@@ -31,7 +31,7 @@ class TestCode : public HandleAndZoneScope {
int Jump(int target) {
Start();
- InstructionOperand* ops[] = {UseRpo(target)};
+ InstructionOperand ops[] = {UseRpo(target)};
sequence_.AddInstruction(Instruction::New(main_zone(), kArchJmp, 0, NULL, 1,
ops, 0, NULL)->MarkAsControl());
int pos = static_cast<int>(sequence_.instructions().size() - 1);
@@ -44,7 +44,7 @@ class TestCode : public HandleAndZoneScope {
}
int Branch(int ttarget, int ftarget) {
Start();
- InstructionOperand* ops[] = {UseRpo(ttarget), UseRpo(ftarget)};
+ InstructionOperand ops[] = {UseRpo(ttarget), UseRpo(ftarget)};
InstructionCode code = 119 | FlagsModeField::encode(kFlags_branch) |
FlagsConditionField::encode(kEqual);
sequence_.AddInstruction(Instruction::New(main_zone(), code, 0, NULL, 2,
@@ -60,16 +60,16 @@ class TestCode : public HandleAndZoneScope {
void RedundantMoves() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
- int index = static_cast<int>(sequence_.instructions().size()) - 1;
- sequence_.AddGapMove(index, RegisterOperand::Create(13, main_zone()),
- RegisterOperand::Create(13, main_zone()));
+ int index = static_cast<int>(sequence_.instructions().size()) - 2;
+ sequence_.AddGapMove(index, RegisterOperand::New(13, main_zone()),
+ RegisterOperand::New(13, main_zone()));
}
void NonRedundantMoves() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
- int index = static_cast<int>(sequence_.instructions().size()) - 1;
- sequence_.AddGapMove(index, ImmediateOperand::Create(11, main_zone()),
- RegisterOperand::Create(11, main_zone()));
+ int index = static_cast<int>(sequence_.instructions().size()) - 2;
+ sequence_.AddGapMove(index, ImmediateOperand::New(11, main_zone()),
+ RegisterOperand::New(11, main_zone()));
}
void Other() {
Start();
@@ -81,9 +81,9 @@ class TestCode : public HandleAndZoneScope {
current_ = NULL;
rpo_number_ = RpoNumber::FromInt(rpo_number_.ToInt() + 1);
}
- InstructionOperand* UseRpo(int num) {
+ InstructionOperand UseRpo(int num) {
int index = sequence_.AddImmediate(Constant(RpoNumber::FromInt(num)));
- return ImmediateOperand::Create(index, main_zone());
+ return ImmediateOperand(index);
}
void Start(bool deferred = false) {
if (current_ == NULL) {
@@ -102,7 +102,7 @@ class TestCode : public HandleAndZoneScope {
void VerifyForwarding(TestCode& code, int count, int* expected) {
- Zone local_zone(code.main_isolate());
+ Zone local_zone;
ZoneVector<RpoNumber> result(&local_zone);
JumpThreading::ComputeForwarding(&local_zone, result, &code.sequence_);
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 117caf22d8..13695b2e0b 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/zone.h"
@@ -32,9 +33,9 @@ static Handle<JSFunction> Compile(const char* source) {
->NewStringFromUtf8(CStrVector(source))
.ToHandleChecked();
Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
- source_code, Handle<String>(), 0, 0, false,
+ source_code, Handle<String>(), 0, 0, false, false,
Handle<Context>(isolate->native_context()), NULL, NULL,
- v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
+ v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE, false);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_function, isolate->native_context());
}
@@ -44,7 +45,8 @@ TEST(TestLinkageCreate) {
InitializedHandleScope handles;
Handle<JSFunction> function = Compile("a + b");
CompilationInfoWithZone info(function);
- Linkage linkage(info.zone(), &info);
+ CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
+ CHECK(descriptor);
}
@@ -59,10 +61,8 @@ TEST(TestLinkageJSFunctionIncoming) {
Handle<JSFunction> function = v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(CompileRun(sources[i])));
CompilationInfoWithZone info(function);
- Linkage linkage(info.zone(), &info);
-
- CallDescriptor* descriptor = linkage.GetIncomingDescriptor();
- CHECK_NE(NULL, descriptor);
+ CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
+ CHECK(descriptor);
CHECK_EQ(1 + i, static_cast<int>(descriptor->JSParameterCount()));
CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
@@ -74,11 +74,14 @@ TEST(TestLinkageJSFunctionIncoming) {
TEST(TestLinkageCodeStubIncoming) {
Isolate* isolate = CcTest::InitIsolateOnce();
- CompilationInfoWithZone info(static_cast<HydrogenCodeStub*>(NULL), isolate);
- Linkage linkage(info.zone(), &info);
- // TODO(titzer): test linkage creation with a bonafide code stub.
- // this just checks current behavior.
- CHECK_EQ(NULL, linkage.GetIncomingDescriptor());
+ ToNumberStub stub(isolate);
+ CompilationInfoWithZone info(&stub, isolate);
+ CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
+ CHECK(descriptor);
+ CHECK_EQ(1, static_cast<int>(descriptor->JSParameterCount()));
+ CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
+ CHECK_EQ(Operator::kNoProperties, descriptor->properties());
+ CHECK_EQ(false, descriptor->IsJSFunctionCall());
}
@@ -86,12 +89,11 @@ TEST(TestLinkageJSCall) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + c");
CompilationInfoWithZone info(function);
- Linkage linkage(info.zone(), &info);
for (int i = 0; i < 32; i++) {
- CallDescriptor* descriptor =
- linkage.GetJSCallDescriptor(i, CallDescriptor::kNoFlags);
- CHECK_NE(NULL, descriptor);
+ CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
+ info.zone(), false, i, CallDescriptor::kNoFlags);
+ CHECK(descriptor);
CHECK_EQ(i, static_cast<int>(descriptor->JSParameterCount()));
CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
CHECK_EQ(Operator::kNoProperties, descriptor->properties());
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index 9c112681c2..06682ef5a9 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -39,7 +39,7 @@ class LoopFinderTester : HandleAndZoneScope {
: isolate(main_isolate()),
common(main_zone()),
graph(main_zone()),
- jsgraph(&graph, &common, NULL, NULL),
+ jsgraph(main_isolate(), &graph, &common, NULL, NULL),
start(graph.NewNode(common.Start(1))),
end(graph.NewNode(common.End(), start)),
p0(graph.NewNode(common.Parameter(0), start)),
@@ -127,7 +127,7 @@ class LoopFinderTester : HandleAndZoneScope {
OFStream os(stdout);
os << AsRPO(graph);
}
- Zone zone(isolate);
+ Zone zone;
loop_tree = LoopFinder::BuildLoopTree(&graph, &zone);
}
return loop_tree;
@@ -136,7 +136,7 @@ class LoopFinderTester : HandleAndZoneScope {
void CheckLoop(Node** header, int header_count, Node** body, int body_count) {
LoopTree* tree = GetLoopTree();
LoopTree::Loop* loop = tree->ContainingLoop(header[0]);
- CHECK_NE(NULL, loop);
+ CHECK(loop);
CHECK(header_count == static_cast<int>(loop->HeaderSize()));
for (int i = 0; i < header_count; i++) {
@@ -146,6 +146,7 @@ class LoopFinderTester : HandleAndZoneScope {
}
CHECK_EQ(body_count, static_cast<int>(loop->BodySize()));
+ // TODO(turbofan): O(n^2) set equivalence in this test.
for (int i = 0; i < body_count; i++) {
// Each body node should be contained in the loop.
CHECK(tree->Contains(loop, body[i]));
@@ -154,7 +155,6 @@ class LoopFinderTester : HandleAndZoneScope {
}
void CheckRangeContains(NodeRange range, Node* node) {
- // O(n) ftw.
CHECK_NE(range.end(), std::find(range.begin(), range.end(), node));
}
@@ -164,7 +164,7 @@ class LoopFinderTester : HandleAndZoneScope {
Node* header = chain[i];
// Each header should be in a loop.
LoopTree::Loop* loop = tree->ContainingLoop(header);
- CHECK_NE(NULL, loop);
+ CHECK(loop);
// Check parentage.
LoopTree::Loop* parent =
i == 0 ? NULL : tree->ContainingLoop(chain[i - 1]);
@@ -178,6 +178,8 @@ class LoopFinderTester : HandleAndZoneScope {
}
}
}
+
+ Zone* zone() { return main_zone(); }
};
@@ -262,6 +264,23 @@ TEST(LaLoop1) {
}
+TEST(LaLoop1phi) {
+ // One loop with a simple phi.
+ LoopFinderTester t;
+ While w(t, t.p0);
+ Node* phi =
+ t.graph.NewNode(t.common.Phi(kMachAnyTagged, 2), t.zero, t.one, w.loop);
+ t.Return(phi, t.start, w.exit);
+
+ Node* chain[] = {w.loop};
+ t.CheckNestedLoops(chain, 1);
+
+ Node* header[] = {w.loop, phi};
+ Node* body[] = {w.branch, w.if_true};
+ t.CheckLoop(header, 2, body, 2);
+}
+
+
TEST(LaLoop1c) {
// One loop with a counter.
LoopFinderTester t;
@@ -451,6 +470,41 @@ TEST(LaNestedLoop1c) {
}
+TEST(LaNestedLoop1x) {
+ // One loop nested in another.
+ LoopFinderTester t;
+ While w1(t, t.p0);
+ While w2(t, t.p0);
+ w2.nest(w1);
+
+ const Operator* op = t.common.Phi(kMachInt32, 2);
+ Node* p1a = t.graph.NewNode(op, t.p0, t.p0, w1.loop);
+ Node* p1b = t.graph.NewNode(op, t.p0, t.p0, w1.loop);
+ Node* p2a = t.graph.NewNode(op, p1a, t.p0, w2.loop);
+ Node* p2b = t.graph.NewNode(op, p1b, t.p0, w2.loop);
+
+ p1a->ReplaceInput(1, p2b);
+ p1b->ReplaceInput(1, p2a);
+
+ p2a->ReplaceInput(1, p2b);
+ p2b->ReplaceInput(1, p2a);
+
+ t.Return(t.p0, p1a, w1.exit);
+
+ Node* chain[] = {w1.loop, w2.loop};
+ t.CheckNestedLoops(chain, 2);
+
+ Node* h1[] = {w1.loop, p1a, p1b};
+ Node* b1[] = {w1.branch, w1.if_true, w2.loop, p2a,
+ p2b, w2.branch, w2.if_true, w2.exit};
+ t.CheckLoop(h1, 3, b1, 8);
+
+ Node* h2[] = {w2.loop, p2a, p2b};
+ Node* b2[] = {w2.branch, w2.if_true};
+ t.CheckLoop(h2, 3, b2, 2);
+}
+
+
TEST(LaNestedLoop2) {
// Two loops nested in an outer loop.
LoopFinderTester t;
@@ -822,7 +876,7 @@ void RunEdgeMatrix3(int c1a, int c1b, int c1c, // line break
// Runs all combinations with a fixed {i}.
-void RunEdgeMatrix3_i(int i) {
+static void RunEdgeMatrix3_i(int i) {
for (int a = 0; a < 1; a++) {
for (int b = 0; b < 1; b++) {
for (int c = 0; c < 4; c++) {
@@ -860,3 +914,102 @@ TEST(LaEdgeMatrix3_4) { RunEdgeMatrix3_i(4); }
TEST(LaEdgeMatrix3_5) { RunEdgeMatrix3_i(5); }
+
+
+static void RunManyChainedLoops_i(int count) {
+ LoopFinderTester t;
+ Node** nodes = t.zone()->NewArray<Node*>(count * 4);
+ Node* k11 = t.jsgraph.Int32Constant(11);
+ Node* k12 = t.jsgraph.Int32Constant(12);
+ Node* last = t.start;
+
+ // Build loops.
+ for (int i = 0; i < count; i++) {
+ Node* loop = t.graph.NewNode(t.common.Loop(2), last, t.start);
+ Node* phi = t.graph.NewNode(t.common.Phi(kMachInt32, 2), k11, k12, loop);
+ Node* branch = t.graph.NewNode(t.common.Branch(), phi, loop);
+ Node* if_true = t.graph.NewNode(t.common.IfTrue(), branch);
+ Node* exit = t.graph.NewNode(t.common.IfFalse(), branch);
+ loop->ReplaceInput(1, if_true);
+
+ nodes[i * 4 + 0] = loop;
+ nodes[i * 4 + 1] = phi;
+ nodes[i * 4 + 2] = branch;
+ nodes[i * 4 + 3] = if_true;
+
+ last = exit;
+ }
+
+ Node* ret = t.graph.NewNode(t.common.Return(), t.p0, t.start, last);
+ t.graph.SetEnd(ret);
+
+ // Verify loops.
+ for (int i = 0; i < count; i++) {
+ t.CheckLoop(nodes + i * 4, 2, nodes + i * 4 + 2, 2);
+ }
+}
+
+
+static void RunManyNestedLoops_i(int count) {
+ LoopFinderTester t;
+ Node** nodes = t.zone()->NewArray<Node*>(count * 5);
+ Node* k11 = t.jsgraph.Int32Constant(11);
+ Node* k12 = t.jsgraph.Int32Constant(12);
+ Node* outer = nullptr;
+ Node* entry = t.start;
+
+ // Build loops.
+ for (int i = 0; i < count; i++) {
+ Node* loop = t.graph.NewNode(t.common.Loop(2), entry, t.start);
+ Node* phi = t.graph.NewNode(t.common.Phi(kMachInt32, 2), k11, k12, loop);
+ Node* branch = t.graph.NewNode(t.common.Branch(), phi, loop);
+ Node* if_true = t.graph.NewNode(t.common.IfTrue(), branch);
+ Node* exit = t.graph.NewNode(t.common.IfFalse(), branch);
+
+ nodes[i * 5 + 0] = exit; // outside
+ nodes[i * 5 + 1] = loop; // header
+ nodes[i * 5 + 2] = phi; // header
+ nodes[i * 5 + 3] = branch; // body
+ nodes[i * 5 + 4] = if_true; // body
+
+ if (outer != nullptr) {
+ // inner loop.
+ outer->ReplaceInput(1, exit);
+ } else {
+ // outer loop.
+ Node* ret = t.graph.NewNode(t.common.Return(), t.p0, t.start, exit);
+ t.graph.SetEnd(ret);
+ }
+ outer = loop;
+ entry = if_true;
+ }
+ outer->ReplaceInput(1, entry); // innermost loop.
+
+ // Verify loops.
+ for (int i = 0; i < count; i++) {
+ int k = i * 5;
+ t.CheckLoop(nodes + k + 1, 2, nodes + k + 3, count * 5 - k - 3);
+ }
+}
+
+
+TEST(LaManyChained_30) { RunManyChainedLoops_i(30); }
+TEST(LaManyChained_31) { RunManyChainedLoops_i(31); }
+TEST(LaManyChained_32) { RunManyChainedLoops_i(32); }
+TEST(LaManyChained_33) { RunManyChainedLoops_i(33); }
+TEST(LaManyChained_34) { RunManyChainedLoops_i(34); }
+TEST(LaManyChained_62) { RunManyChainedLoops_i(62); }
+TEST(LaManyChained_63) { RunManyChainedLoops_i(63); }
+TEST(LaManyChained_64) { RunManyChainedLoops_i(64); }
+
+TEST(LaManyNested_30) { RunManyNestedLoops_i(30); }
+TEST(LaManyNested_31) { RunManyNestedLoops_i(31); }
+TEST(LaManyNested_32) { RunManyNestedLoops_i(32); }
+TEST(LaManyNested_33) { RunManyNestedLoops_i(33); }
+TEST(LaManyNested_34) { RunManyNestedLoops_i(34); }
+TEST(LaManyNested_62) { RunManyNestedLoops_i(62); }
+TEST(LaManyNested_63) { RunManyNestedLoops_i(63); }
+TEST(LaManyNested_64) { RunManyNestedLoops_i(64); }
+
+
+TEST(LaPhiTangle) { LoopFinderTester t; }
diff --git a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
index aabd95bc23..7513307bab 100644
--- a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
@@ -31,24 +31,24 @@ struct TestHelper : public HandleAndZoneScope {
// TODO(titzer): don't scope analyze every single time.
CompilationInfo info(function, main_zone());
- CHECK(Parser::Parse(&info));
+ CHECK(Parser::ParseStatic(&info));
CHECK(Rewriter::Rewrite(&info));
CHECK(Scope::Analyze(&info));
Scope* scope = info.function()->scope();
AstValueFactory* factory = info.ast_value_factory();
- CHECK_NE(NULL, scope);
+ CHECK(scope);
if (result == NULL) {
AstLoopAssignmentAnalyzer analyzer(main_zone(), &info);
result = analyzer.Analyze();
- CHECK_NE(NULL, result);
+ CHECK(result);
}
const i::AstRawString* name = factory->GetOneByteString(var_name);
i::Variable* var = scope->Lookup(name);
- CHECK_NE(NULL, var);
+ CHECK(var);
if (var->location() == Variable::UNALLOCATED) {
CHECK_EQ(0, expected);
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index 648e1b923a..7ee5751875 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -61,8 +61,8 @@ class ReducerTester : public HandleAndZoneScope {
common(main_zone()),
graph(main_zone()),
javascript(main_zone()),
- typer(&graph, MaybeHandle<Context>()),
- jsgraph(&graph, &common, &javascript, &machine),
+ typer(isolate, &graph, MaybeHandle<Context>()),
+ jsgraph(isolate, &graph, &common, &javascript, &machine),
maxuint32(Constant<int32_t>(kMaxUInt32)) {
Node* s = graph.NewNode(common.Start(num_parameters));
graph.SetStart(s);
@@ -100,7 +100,7 @@ class ReducerTester : public HandleAndZoneScope {
// the {expect} value.
template <typename T>
void CheckFoldBinop(volatile T expect, Node* a, Node* b) {
- CHECK_NE(NULL, binop);
+ CHECK(binop);
Node* n = CreateBinopNode(a, b);
MachineOperatorReducer reducer(&jsgraph);
Reduction reduction = reducer.Reduce(n);
@@ -112,7 +112,7 @@ class ReducerTester : public HandleAndZoneScope {
// Check that the reduction of this binop applied to {a} and {b} yields
// the {expect} node.
void CheckBinop(Node* expect, Node* a, Node* b) {
- CHECK_NE(NULL, binop);
+ CHECK(binop);
Node* n = CreateBinopNode(a, b);
MachineOperatorReducer reducer(&jsgraph);
Reduction reduction = reducer.Reduce(n);
@@ -124,7 +124,7 @@ class ReducerTester : public HandleAndZoneScope {
// this binop applied to {left_expect} and {right_expect}.
void CheckFoldBinop(Node* left_expect, Node* right_expect, Node* left,
Node* right) {
- CHECK_NE(NULL, binop);
+ CHECK(binop);
Node* n = CreateBinopNode(left, right);
MachineOperatorReducer reducer(&jsgraph);
Reduction reduction = reducer.Reduce(n);
@@ -139,7 +139,7 @@ class ReducerTester : public HandleAndZoneScope {
template <typename T>
void CheckFoldBinop(volatile T left_expect, const Operator* op_expect,
Node* right_expect, Node* left, Node* right) {
- CHECK_NE(NULL, binop);
+ CHECK(binop);
Node* n = CreateBinopNode(left, right);
MachineOperatorReducer reducer(&jsgraph);
Reduction r = reducer.Reduce(n);
@@ -154,7 +154,7 @@ class ReducerTester : public HandleAndZoneScope {
template <typename T>
void CheckFoldBinop(Node* left_expect, const Operator* op_expect,
volatile T right_expect, Node* left, Node* right) {
- CHECK_NE(NULL, binop);
+ CHECK(binop);
Node* n = CreateBinopNode(left, right);
MachineOperatorReducer reducer(&jsgraph);
Reduction r = reducer.Reduce(n);
@@ -723,133 +723,6 @@ TEST(ReduceLoadStore) {
}
-static void CheckNans(ReducerTester* R) {
- Node* x = R->Parameter();
- std::vector<double> nans = ValueHelper::nan_vector();
- for (std::vector<double>::const_iterator pl = nans.begin(); pl != nans.end();
- ++pl) {
- for (std::vector<double>::const_iterator pr = nans.begin();
- pr != nans.end(); ++pr) {
- Node* nan1 = R->Constant<double>(*pl);
- Node* nan2 = R->Constant<double>(*pr);
- R->CheckBinop(nan1, x, nan1); // x op NaN => NaN
- R->CheckBinop(nan1, nan1, x); // NaN op x => NaN
- R->CheckBinop(nan1, nan2, nan1); // NaN op NaN => NaN
- }
- }
-}
-
-
-TEST(ReduceFloat64Add) {
- ReducerTester R;
- R.binop = R.machine.Float64Add();
-
- FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- double x = *pl, y = *pr;
- R.CheckFoldBinop<double>(x + y, x, y);
- }
- }
-
- FOR_FLOAT64_INPUTS(i) {
- Double tmp(*i);
- if (!tmp.IsSpecial() || tmp.IsInfinite()) {
- // Don't check NaNs as they are reduced more.
- R.CheckPutConstantOnRight(*i);
- }
- }
-
- CheckNans(&R);
-}
-
-
-TEST(ReduceFloat64Sub) {
- ReducerTester R;
- R.binop = R.machine.Float64Sub();
-
- FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- double x = *pl, y = *pr;
- R.CheckFoldBinop<double>(x - y, x, y);
- }
- }
-
- Node* zero = R.Constant<double>(0.0);
- Node* x = R.Parameter();
-
- R.CheckBinop(x, x, zero); // x - 0.0 => x
-
- CheckNans(&R);
-}
-
-
-TEST(ReduceFloat64Mul) {
- ReducerTester R;
- R.binop = R.machine.Float64Mul();
-
- FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- double x = *pl, y = *pr;
- R.CheckFoldBinop<double>(x * y, x, y);
- }
- }
-
- double inf = V8_INFINITY;
- R.CheckPutConstantOnRight(-inf);
- R.CheckPutConstantOnRight(-0.1);
- R.CheckPutConstantOnRight(0.1);
- R.CheckPutConstantOnRight(inf);
-
- Node* x = R.Parameter();
- Node* one = R.Constant<double>(1.0);
-
- R.CheckBinop(x, x, one); // x * 1.0 => x
- R.CheckBinop(x, one, x); // 1.0 * x => x
-
- CheckNans(&R);
-}
-
-
-TEST(ReduceFloat64Div) {
- ReducerTester R;
- R.binop = R.machine.Float64Div();
-
- FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- double x = *pl, y = *pr;
- R.CheckFoldBinop<double>(x / y, x, y);
- }
- }
-
- Node* x = R.Parameter();
- Node* one = R.Constant<double>(1.0);
-
- R.CheckBinop(x, x, one); // x / 1.0 => x
-
- CheckNans(&R);
-}
-
-
-TEST(ReduceFloat64Mod) {
- ReducerTester R;
- R.binop = R.machine.Float64Mod();
-
- FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- double x = *pl, y = *pr;
- R.CheckFoldBinop<double>(modulo(x, y), x, y);
- }
- }
-
- Node* x = R.Parameter();
- Node* zero = R.Constant<double>(0.0);
-
- R.CheckFoldBinop<double>(v8::base::OS::nan_value(), x, zero);
-
- CheckNans(&R);
-}
-
-
// TODO(titzer): test MachineOperatorReducer for Word64And
// TODO(titzer): test MachineOperatorReducer for Word64Or
// TODO(titzer): test MachineOperatorReducer for Word64Xor
@@ -870,3 +743,8 @@ TEST(ReduceFloat64Mod) {
// TODO(titzer): test MachineOperatorReducer for ChangeInt32ToFloat64
// TODO(titzer): test MachineOperatorReducer for ChangeFloat64ToInt32
// TODO(titzer): test MachineOperatorReducer for Float64Compare
+// TODO(titzer): test MachineOperatorReducer for Float64Add
+// TODO(titzer): test MachineOperatorReducer for Float64Sub
+// TODO(titzer): test MachineOperatorReducer for Float64Mul
+// TODO(titzer): test MachineOperatorReducer for Float64Div
+// TODO(titzer): test MachineOperatorReducer for Float64Mod
diff --git a/deps/v8/test/cctest/compiler/test-node-cache.cc b/deps/v8/test/cctest/compiler/test-node-cache.cc
index a48adb9acc..b11e859cbc 100644
--- a/deps/v8/test/cctest/compiler/test-node-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-node-cache.cc
@@ -17,7 +17,7 @@ TEST(Int32Constant_back_to_back) {
for (int i = -2000000000; i < 2000000000; i += 3315177) {
Node** pos = cache.Find(graph.zone(), i);
- CHECK_NE(NULL, pos);
+ CHECK(pos);
for (int j = 0; j < 3; j++) {
Node** npos = cache.Find(graph.zone(), i);
CHECK_EQ(pos, npos);
@@ -80,7 +80,7 @@ TEST(Int64Constant_back_to_back) {
for (int64_t i = -2000000000; i < 2000000000; i += 3315177) {
Node** pos = cache.Find(graph.zone(), i);
- CHECK_NE(NULL, pos);
+ CHECK(pos);
for (int j = 0; j < 3; j++) {
Node** npos = cache.Find(graph.zone(), i);
CHECK_EQ(pos, npos);
@@ -115,7 +115,7 @@ TEST(Int64Constant_hits) {
}
-static bool Contains(NodeVector* nodes, Node* n) {
+static bool Contains(ZoneVector<Node*>* nodes, Node* n) {
for (size_t i = 0; i < nodes->size(); i++) {
if (nodes->at(i) == n) return true;
}
@@ -135,11 +135,11 @@ TEST(NodeCache_GetCachedNodes_int32) {
int32_t k = constants[i];
Node** pos = cache.Find(graph.zone(), k);
if (*pos != NULL) {
- NodeVector nodes(graph.zone());
+ ZoneVector<Node*> nodes(graph.zone());
cache.GetCachedNodes(&nodes);
CHECK(Contains(&nodes, *pos));
} else {
- NodeVector nodes(graph.zone());
+ ZoneVector<Node*> nodes(graph.zone());
Node* n = graph.NewNode(common.Int32Constant(k));
*pos = n;
cache.GetCachedNodes(&nodes);
@@ -161,11 +161,11 @@ TEST(NodeCache_GetCachedNodes_int64) {
int64_t k = constants[i];
Node** pos = cache.Find(graph.zone(), k);
if (*pos != NULL) {
- NodeVector nodes(graph.zone());
+ ZoneVector<Node*> nodes(graph.zone());
cache.GetCachedNodes(&nodes);
CHECK(Contains(&nodes, *pos));
} else {
- NodeVector nodes(graph.zone());
+ ZoneVector<Node*> nodes(graph.zone());
Node* n = graph.NewNode(common.Int64Constant(k));
*pos = n;
cache.GetCachedNodes(&nodes);
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index eafabd3578..2c51e26f86 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -16,136 +16,26 @@ using namespace v8::internal::compiler;
static Operator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
"dummy", 0, 0, 0, 1, 0, 0);
-TEST(NodeAllocation) {
- GraphTester graph;
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator);
- CHECK(n2->id() != n1->id());
-}
-
-
-TEST(NodeWithOpcode) {
- GraphTester graph;
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator);
- CHECK(n1->op() == &dummy_operator);
- CHECK(n2->op() == &dummy_operator);
-}
-
-
-TEST(NodeInputs1) {
- GraphTester graph;
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
- CHECK_EQ(1, n2->InputCount());
- CHECK(n0 == n2->InputAt(0));
-}
-
-
-TEST(NodeInputs2) {
- GraphTester graph;
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
- CHECK_EQ(2, n2->InputCount());
- CHECK(n0 == n2->InputAt(0));
- CHECK(n1 == n2->InputAt(1));
-}
-
-
-TEST(NodeInputs3) {
- GraphTester graph;
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n1, n1);
- CHECK_EQ(3, n2->InputCount());
- CHECK(n0 == n2->InputAt(0));
- CHECK(n1 == n2->InputAt(1));
- CHECK(n1 == n2->InputAt(2));
-}
-
-
-TEST(NodeInputIteratorEmpty) {
- GraphTester graph;
- Node* n1 = graph.NewNode(&dummy_operator);
- Node::Inputs::iterator i(n1->inputs().begin());
- int input_count = 0;
- for (; i != n1->inputs().end(); ++i) {
- input_count++;
- }
- CHECK_EQ(0, input_count);
-}
-
-
-TEST(NodeInputIteratorOne) {
- GraphTester graph;
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node::Inputs::iterator i(n1->inputs().begin());
- CHECK_EQ(1, n1->InputCount());
- CHECK_EQ(n0, *i);
- ++i;
- CHECK(n1->inputs().end() == i);
-}
-
-
-TEST(NodeUseIteratorEmpty) {
- GraphTester graph;
- Node* n1 = graph.NewNode(&dummy_operator);
- int use_count = 0;
- for (Edge const edge : n1->use_edges()) {
- USE(edge);
- use_count++;
- }
- CHECK_EQ(0, use_count);
-}
-
-
-TEST(NodeUseIteratorOne) {
- GraphTester graph;
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node::Uses::iterator i(n0->uses().begin());
- CHECK_EQ(n1, *i);
- ++i;
- CHECK(n0->uses().end() == i);
-}
-
-
-TEST(NodeUseIteratorReplaceNoUses) {
- GraphTester graph;
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator);
- Node* n3 = graph.NewNode(&dummy_operator, n2);
- n0->ReplaceUses(n1);
- CHECK(n0->uses().begin() == n0->uses().end());
- n0->ReplaceUses(n2);
- CHECK(n0->uses().begin() == n0->uses().end());
- USE(n3);
-}
-
-
TEST(NodeUseIteratorReplaceUses) {
GraphTester graph;
Node* n0 = graph.NewNode(&dummy_operator);
Node* n1 = graph.NewNode(&dummy_operator, n0);
Node* n2 = graph.NewNode(&dummy_operator, n0);
Node* n3 = graph.NewNode(&dummy_operator);
- Node::Uses::iterator i1(n0->uses().begin());
+ auto i1(n0->uses().begin());
CHECK_EQ(n1, *i1);
++i1;
CHECK_EQ(n2, *i1);
n0->ReplaceUses(n3);
- Node::Uses::iterator i2(n3->uses().begin());
+ auto i2(n3->uses().begin());
CHECK_EQ(n1, *i2);
++i2;
CHECK_EQ(n2, *i2);
- Node::Inputs::iterator i3(n1->inputs().begin());
+ auto i3(n1->inputs().begin());
CHECK_EQ(n3, *i3);
++i3;
CHECK(n1->inputs().end() == i3);
- Node::Inputs::iterator i4(n2->inputs().begin());
+ auto i4(n2->inputs().begin());
CHECK_EQ(n3, *i4);
++i4;
CHECK(n2->inputs().end() == i4);
@@ -160,14 +50,14 @@ TEST(NodeUseIteratorReplaceUsesSelf) {
n1->ReplaceInput(0, n1); // Create self-reference.
- Node::Uses::iterator i1(n1->uses().begin());
+ auto i1(n1->uses().begin());
CHECK_EQ(n1, *i1);
n1->ReplaceUses(n3);
CHECK(n1->uses().begin() == n1->uses().end());
- Node::Uses::iterator i2(n3->uses().begin());
+ auto i2(n3->uses().begin());
CHECK_EQ(n1, *i2);
++i2;
CHECK(n1->uses().end() == i2);
@@ -180,7 +70,7 @@ TEST(ReplaceInput) {
Node* n1 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator);
Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
- Node::Inputs::iterator i1(n3->inputs().begin());
+ auto i1(n3->inputs().begin());
CHECK(n0 == *i1);
CHECK_EQ(n0, n3->InputAt(0));
++i1;
@@ -192,26 +82,26 @@ TEST(ReplaceInput) {
++i1;
CHECK(i1 == n3->inputs().end());
- Node::Uses::iterator i2(n1->uses().begin());
+ auto i2(n1->uses().begin());
CHECK_EQ(n3, *i2);
++i2;
CHECK(i2 == n1->uses().end());
Node* n4 = graph.NewNode(&dummy_operator);
- Node::Uses::iterator i3(n4->uses().begin());
+ auto i3(n4->uses().begin());
CHECK(i3 == n4->uses().end());
n3->ReplaceInput(1, n4);
- Node::Uses::iterator i4(n1->uses().begin());
+ auto i4(n1->uses().begin());
CHECK(i4 == n1->uses().end());
- Node::Uses::iterator i5(n4->uses().begin());
+ auto i5(n4->uses().begin());
CHECK_EQ(n3, *i5);
++i5;
CHECK(i5 == n4->uses().end());
- Node::Inputs::iterator i6(n3->inputs().begin());
+ auto i6(n3->inputs().begin());
CHECK(n0 == *i6);
CHECK_EQ(n0, n3->InputAt(0));
++i6;
@@ -321,7 +211,7 @@ TEST(Inputs) {
// Make sure uses have been hooked op correctly.
Node::Uses uses(n4->uses());
- Node::Uses::iterator current = uses.begin();
+ auto current = uses.begin();
CHECK(current != uses.end());
CHECK(*current == n3);
++current;
@@ -450,7 +340,7 @@ TEST(ReplaceUsesFromAppendedInputs) {
CHECK_EQ(3, n3->UseCount());
Node::Uses uses(n3->uses());
- Node::Uses::iterator current = uses.begin();
+ auto current = uses.begin();
CHECK(current != uses.end());
CHECK(*current == n1);
++current;
@@ -464,76 +354,6 @@ TEST(ReplaceUsesFromAppendedInputs) {
}
-template <bool result>
-struct FixedPredicate {
- bool operator()(const Node* node) const { return result; }
-};
-
-
-TEST(ReplaceUsesIfWithFixedPredicate) {
- GraphTester graph;
-
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
- Node* n3 = graph.NewNode(&dummy_operator);
-
- CHECK_EQ(0, n2->UseCount());
- n2->ReplaceUsesIf(FixedPredicate<true>(), n1);
- CHECK_EQ(0, n2->UseCount());
- n2->ReplaceUsesIf(FixedPredicate<false>(), n1);
- CHECK_EQ(0, n2->UseCount());
-
- CHECK_EQ(0, n3->UseCount());
- n3->ReplaceUsesIf(FixedPredicate<true>(), n1);
- CHECK_EQ(0, n3->UseCount());
- n3->ReplaceUsesIf(FixedPredicate<false>(), n1);
- CHECK_EQ(0, n3->UseCount());
-
- CHECK_EQ(2, n0->UseCount());
- CHECK_EQ(0, n1->UseCount());
- n0->ReplaceUsesIf(FixedPredicate<false>(), n1);
- CHECK_EQ(2, n0->UseCount());
- CHECK_EQ(0, n1->UseCount());
- n0->ReplaceUsesIf(FixedPredicate<true>(), n1);
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(2, n1->UseCount());
-
- n1->AppendInput(graph.zone(), n1);
- CHECK_EQ(3, n1->UseCount());
- n1->AppendInput(graph.zone(), n3);
- CHECK_EQ(1, n3->UseCount());
- n3->ReplaceUsesIf(FixedPredicate<true>(), n1);
- CHECK_EQ(4, n1->UseCount());
- CHECK_EQ(0, n3->UseCount());
- n1->ReplaceUsesIf(FixedPredicate<false>(), n3);
- CHECK_EQ(4, n1->UseCount());
- CHECK_EQ(0, n3->UseCount());
-}
-
-
-TEST(ReplaceUsesIfWithEqualTo) {
- GraphTester graph;
-
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
-
- CHECK_EQ(0, n2->UseCount());
- n2->ReplaceUsesIf(std::bind1st(std::equal_to<Node*>(), n1), n0);
- CHECK_EQ(0, n2->UseCount());
-
- CHECK_EQ(2, n0->UseCount());
- CHECK_EQ(1, n1->UseCount());
- n1->ReplaceUsesIf(std::bind1st(std::equal_to<Node*>(), n0), n0);
- CHECK_EQ(2, n0->UseCount());
- CHECK_EQ(1, n1->UseCount());
- n0->ReplaceUsesIf(std::bind2nd(std::equal_to<Node*>(), n2), n1);
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(2, n1->UseCount());
-}
-
-
TEST(ReplaceInputMultipleUses) {
GraphTester graph;
@@ -812,15 +632,15 @@ TEST(RemoveAllInputs) {
n1->RemoveAllInputs();
CHECK_EQ(1, n1->InputCount());
CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(NULL, n1->InputAt(0));
+ CHECK(!n1->InputAt(0));
CHECK_EQ(1, n1->UseCount());
n2->RemoveAllInputs();
CHECK_EQ(2, n2->InputCount());
CHECK_EQ(0, n0->UseCount());
CHECK_EQ(0, n1->UseCount());
- CHECK_EQ(NULL, n2->InputAt(0));
- CHECK_EQ(NULL, n2->InputAt(1));
+ CHECK(!n2->InputAt(0));
+ CHECK(!n2->InputAt(1));
}
{
@@ -833,6 +653,6 @@ TEST(RemoveAllInputs) {
n1->RemoveAllInputs();
CHECK_EQ(1, n1->InputCount());
CHECK_EQ(0, n1->UseCount());
- CHECK_EQ(NULL, n1->InputAt(0));
+ CHECK(!n1->InputAt(0));
}
}
diff --git a/deps/v8/test/cctest/compiler/test-operator.cc b/deps/v8/test/cctest/compiler/test-operator.cc
index 39f660fef9..e635da797d 100644
--- a/deps/v8/test/cctest/compiler/test-operator.cc
+++ b/deps/v8/test/cctest/compiler/test-operator.cc
@@ -80,14 +80,14 @@ TEST(TestOperator_Print) {
Operator op1a(19, NONE, "Another1", 0, 0, 0, 0, 0, 0);
Operator op1b(19, FOLD, "Another2", 2, 0, 0, 2, 0, 0);
- CHECK_EQ("Another1", OperatorToString(&op1a).get());
- CHECK_EQ("Another2", OperatorToString(&op1b).get());
+ CHECK_EQ(0, strcmp("Another1", OperatorToString(&op1a).get()));
+ CHECK_EQ(0, strcmp("Another2", OperatorToString(&op1b).get()));
Operator op2a(20, NONE, "Flog1", 0, 0, 0, 0, 0, 0);
Operator op2b(20, FOLD, "Flog2", 1, 0, 0, 1, 0, 0);
- CHECK_EQ("Flog1", OperatorToString(&op2a).get());
- CHECK_EQ("Flog2", OperatorToString(&op2b).get());
+ CHECK_EQ(0, strcmp("Flog1", OperatorToString(&op2a).get()));
+ CHECK_EQ(0, strcmp("Flog2", OperatorToString(&op2b).get()));
}
@@ -148,16 +148,16 @@ TEST(TestOperator1int_Equals) {
TEST(TestOperator1int_Print) {
Operator1<int> op1(12, NONE, "Op1Test", 0, 0, 0, 1, 0, 0, 0);
- CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get());
+ CHECK_EQ(0, strcmp("Op1Test[0]", OperatorToString(&op1).get()));
Operator1<int> op2(12, NONE, "Op1Test", 0, 0, 0, 1, 0, 0, 66666666);
- CHECK_EQ("Op1Test[66666666]", OperatorToString(&op2).get());
+ CHECK_EQ(0, strcmp("Op1Test[66666666]", OperatorToString(&op2).get()));
Operator1<int> op3(12, NONE, "FooBar", 0, 0, 0, 1, 0, 0, 2347);
- CHECK_EQ("FooBar[2347]", OperatorToString(&op3).get());
+ CHECK_EQ(0, strcmp("FooBar[2347]", OperatorToString(&op3).get()));
Operator1<int> op4(12, NONE, "BarFoo", 0, 0, 0, 1, 0, 0, -879);
- CHECK_EQ("BarFoo[-879]", OperatorToString(&op4).get());
+ CHECK_EQ(0, strcmp("BarFoo[-879]", OperatorToString(&op4).get()));
}
@@ -179,8 +179,8 @@ TEST(TestOperator1doublePrint) {
Operator1<double> op1a(23, NONE, "Canary", 0, 0, 0, 0, 0, 0, 0.5);
Operator1<double> op1b(23, FOLD, "Finch", 2, 0, 0, 2, 0, 0, -1.5);
- CHECK_EQ("Canary[0.5]", OperatorToString(&op1a).get());
- CHECK_EQ("Finch[-1.5]", OperatorToString(&op1b).get());
+ CHECK_EQ(0, strcmp("Canary[0.5]", OperatorToString(&op1a).get()));
+ CHECK_EQ(0, strcmp("Finch[-1.5]", OperatorToString(&op1b).get()));
}
diff --git a/deps/v8/test/cctest/compiler/test-osr.cc b/deps/v8/test/cctest/compiler/test-osr.cc
new file mode 100644
index 0000000000..e3963901a5
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-osr.cc
@@ -0,0 +1,486 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen.h"
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/osr.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+// TODO(titzer): move this method to a common testing place.
+
+static int CheckInputs(Node* node, Node* i0 = NULL, Node* i1 = NULL,
+ Node* i2 = NULL, Node* i3 = NULL) {
+ int count = 4;
+ if (i3 == NULL) count = 3;
+ if (i2 == NULL) count = 2;
+ if (i1 == NULL) count = 1;
+ if (i0 == NULL) count = 0;
+ CHECK_EQ(count, node->InputCount());
+ if (i0 != NULL) CHECK_EQ(i0, node->InputAt(0));
+ if (i1 != NULL) CHECK_EQ(i1, node->InputAt(1));
+ if (i2 != NULL) CHECK_EQ(i2, node->InputAt(2));
+ if (i3 != NULL) CHECK_EQ(i3, node->InputAt(3));
+ return count;
+}
+
+
+static Operator kIntLt(IrOpcode::kInt32LessThan, Operator::kPure,
+ "Int32LessThan", 2, 0, 0, 1, 0, 0);
+
+
+static const int kMaxOsrValues = 10;
+
+class OsrDeconstructorTester : public HandleAndZoneScope {
+ public:
+ explicit OsrDeconstructorTester(int num_values)
+ : isolate(main_isolate()),
+ common(main_zone()),
+ graph(main_zone()),
+ jsgraph(main_isolate(), &graph, &common, NULL, NULL),
+ start(graph.NewNode(common.Start(1))),
+ p0(graph.NewNode(common.Parameter(0), start)),
+ end(graph.NewNode(common.End(), start)),
+ osr_normal_entry(graph.NewNode(common.OsrNormalEntry(), start, start)),
+ osr_loop_entry(graph.NewNode(common.OsrLoopEntry(), start, start)),
+ self(graph.NewNode(common.Int32Constant(0xaabbccdd))) {
+ CHECK(num_values <= kMaxOsrValues);
+ graph.SetStart(start);
+ for (int i = 0; i < num_values; i++) {
+ osr_values[i] = graph.NewNode(common.OsrValue(i), osr_loop_entry);
+ }
+ }
+
+ Isolate* isolate;
+ CommonOperatorBuilder common;
+ Graph graph;
+ JSGraph jsgraph;
+ Node* start;
+ Node* p0;
+ Node* end;
+ Node* osr_normal_entry;
+ Node* osr_loop_entry;
+ Node* self;
+ Node* osr_values[kMaxOsrValues];
+
+ Node* NewOsrPhi(Node* loop, Node* incoming, int osr_value, Node* back1 = NULL,
+ Node* back2 = NULL, Node* back3 = NULL) {
+ int count = 5;
+ if (back3 == NULL) count = 4;
+ if (back2 == NULL) count = 3;
+ if (back1 == NULL) count = 2;
+ CHECK_EQ(loop->InputCount(), count);
+ CHECK_EQ(osr_loop_entry, loop->InputAt(1));
+
+ Node* inputs[6];
+ inputs[0] = incoming;
+ inputs[1] = osr_values[osr_value];
+ if (count > 2) inputs[2] = back1;
+ if (count > 3) inputs[3] = back2;
+ if (count > 4) inputs[4] = back3;
+ inputs[count] = loop;
+ return graph.NewNode(common.Phi(kMachAnyTagged, count), count + 1, inputs);
+ }
+
+ Node* NewLoop(bool is_osr, int num_backedges, Node* entry = NULL) {
+ CHECK_LT(num_backedges, 4);
+ CHECK_GE(num_backedges, 0);
+ int count = 1 + num_backedges;
+ if (entry == NULL) entry = osr_normal_entry;
+ Node* inputs[5] = {entry, self, self, self, self};
+ if (is_osr) {
+ count = 2 + num_backedges;
+ inputs[1] = osr_loop_entry;
+ }
+
+ Node* loop = graph.NewNode(common.Loop(count), count, inputs);
+ for (int i = 0; i < loop->InputCount(); i++) {
+ if (loop->InputAt(i) == self) loop->ReplaceInput(i, loop);
+ }
+
+ return loop;
+ }
+
+ Node* NewOsrLoop(int num_backedges, Node* entry = NULL) {
+ return NewLoop(true, num_backedges, entry);
+ }
+
+ void DeconstructOsr() {
+ OsrHelper helper(0, 0);
+ helper.Deconstruct(&jsgraph, &common, main_zone());
+ AllNodes nodes(main_zone(), &graph);
+ // Should be edited out.
+ CHECK(!nodes.IsLive(osr_normal_entry));
+ CHECK(!nodes.IsLive(osr_loop_entry));
+ // No dangling nodes should be left over.
+ CHECK_EQ(0u, nodes.gray.size());
+ }
+};
+
+
+TEST(Deconstruct_osr0) {
+ OsrDeconstructorTester T(0);
+
+ Node* loop = T.NewOsrLoop(1);
+
+ T.graph.SetEnd(loop);
+
+ T.DeconstructOsr();
+
+ CheckInputs(loop, T.start, loop);
+}
+
+
+TEST(Deconstruct_osr1) {
+ OsrDeconstructorTester T(1);
+
+ Node* loop = T.NewOsrLoop(1);
+ Node* osr_phi =
+ T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant());
+
+ Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, loop);
+ T.graph.SetEnd(ret);
+
+ T.DeconstructOsr();
+
+ CheckInputs(loop, T.start, loop);
+ CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), loop);
+ CheckInputs(ret, osr_phi, T.start, loop);
+}
+
+
+TEST(Deconstruct_osr1_type) {
+ OsrDeconstructorTester T(1);
+
+ Node* loop = T.NewOsrLoop(1);
+ Node* osr_phi =
+ T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant());
+ Type* type = Type::Signed32();
+ NodeProperties::SetBounds(osr_phi, Bounds(type, type));
+
+ Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, loop);
+ T.graph.SetEnd(ret);
+
+ OsrHelper helper(0, 0);
+ helper.Deconstruct(&T.jsgraph, &T.common, T.main_zone());
+
+ CHECK_EQ(type, NodeProperties::GetBounds(T.osr_values[0]).lower);
+ CHECK_EQ(type, NodeProperties::GetBounds(T.osr_values[0]).upper);
+
+ CheckInputs(loop, T.start, loop);
+ CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), loop);
+ CheckInputs(ret, osr_phi, T.start, loop);
+}
+
+
+TEST(Deconstruct_osr_remove_prologue) {
+ OsrDeconstructorTester T(1);
+ Diamond d(&T.graph, &T.common, T.p0);
+ d.Chain(T.osr_normal_entry);
+
+ Node* loop = T.NewOsrLoop(1, d.merge);
+ Node* osr_phi =
+ T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant());
+
+ Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, loop);
+ T.graph.SetEnd(ret);
+
+ T.DeconstructOsr();
+
+ CheckInputs(loop, T.start, loop);
+ CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), loop);
+ CheckInputs(ret, osr_phi, T.start, loop);
+
+ // The control before the loop should have been removed.
+ AllNodes nodes(T.main_zone(), &T.graph);
+ CHECK(!nodes.IsLive(d.branch));
+ CHECK(!nodes.IsLive(d.if_true));
+ CHECK(!nodes.IsLive(d.if_false));
+ CHECK(!nodes.IsLive(d.merge));
+}
+
+
+TEST(Deconstruct_osr_with_body1) {
+ OsrDeconstructorTester T(1);
+
+ Node* loop = T.NewOsrLoop(1);
+
+ Node* branch = T.graph.NewNode(T.common.Branch(), T.p0, loop);
+ Node* if_true = T.graph.NewNode(T.common.IfTrue(), branch);
+ Node* if_false = T.graph.NewNode(T.common.IfFalse(), branch);
+ loop->ReplaceInput(2, if_true);
+
+ Node* osr_phi =
+ T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant());
+
+ Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, if_false);
+ T.graph.SetEnd(ret);
+
+ T.DeconstructOsr();
+
+ CheckInputs(loop, T.start, if_true);
+ CheckInputs(branch, T.p0, loop);
+ CheckInputs(if_true, branch);
+ CheckInputs(if_false, branch);
+ CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), loop);
+ CheckInputs(ret, osr_phi, T.start, if_false);
+}
+
+
+TEST(Deconstruct_osr_with_body2) {
+ OsrDeconstructorTester T(1);
+
+ Node* loop = T.NewOsrLoop(1);
+
+ // Two chained branches in the the body of the loop.
+ Node* branch1 = T.graph.NewNode(T.common.Branch(), T.p0, loop);
+ Node* if_true1 = T.graph.NewNode(T.common.IfTrue(), branch1);
+ Node* if_false1 = T.graph.NewNode(T.common.IfFalse(), branch1);
+
+ Node* branch2 = T.graph.NewNode(T.common.Branch(), T.p0, if_true1);
+ Node* if_true2 = T.graph.NewNode(T.common.IfTrue(), branch2);
+ Node* if_false2 = T.graph.NewNode(T.common.IfFalse(), branch2);
+ loop->ReplaceInput(2, if_true2);
+
+ Node* osr_phi =
+ T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant());
+
+ Node* merge = T.graph.NewNode(T.common.Merge(2), if_false1, if_false2);
+ Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, merge);
+ T.graph.SetEnd(ret);
+
+ T.DeconstructOsr();
+
+ CheckInputs(loop, T.start, if_true2);
+ CheckInputs(branch1, T.p0, loop);
+ CheckInputs(branch2, T.p0, if_true1);
+ CheckInputs(if_true1, branch1);
+ CheckInputs(if_false1, branch1);
+ CheckInputs(if_true2, branch2);
+ CheckInputs(if_false2, branch2);
+
+ CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), loop);
+ CheckInputs(ret, osr_phi, T.start, merge);
+ CheckInputs(merge, if_false1, if_false2);
+}
+
+
+TEST(Deconstruct_osr_with_body3) {
+ OsrDeconstructorTester T(1);
+
+ Node* loop = T.NewOsrLoop(2);
+
+ // Two branches that create two different backedges.
+ Node* branch1 = T.graph.NewNode(T.common.Branch(), T.p0, loop);
+ Node* if_true1 = T.graph.NewNode(T.common.IfTrue(), branch1);
+ Node* if_false1 = T.graph.NewNode(T.common.IfFalse(), branch1);
+
+ Node* branch2 = T.graph.NewNode(T.common.Branch(), T.p0, if_true1);
+ Node* if_true2 = T.graph.NewNode(T.common.IfTrue(), branch2);
+ Node* if_false2 = T.graph.NewNode(T.common.IfFalse(), branch2);
+ loop->ReplaceInput(2, if_false1);
+ loop->ReplaceInput(3, if_true2);
+
+ Node* osr_phi =
+ T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant(),
+ T.jsgraph.ZeroConstant());
+
+ Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, if_false2);
+ T.graph.SetEnd(ret);
+
+ T.DeconstructOsr();
+
+ CheckInputs(loop, T.start, if_false1, if_true2);
+ CheckInputs(branch1, T.p0, loop);
+ CheckInputs(branch2, T.p0, if_true1);
+ CheckInputs(if_true1, branch1);
+ CheckInputs(if_false1, branch1);
+ CheckInputs(if_true2, branch2);
+ CheckInputs(if_false2, branch2);
+
+ CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(),
+ T.jsgraph.ZeroConstant(), loop);
+ CheckInputs(ret, osr_phi, T.start, if_false2);
+}
+
+
+struct While {
+ OsrDeconstructorTester& t;
+ Node* branch;
+ Node* if_true;
+ Node* exit;
+ Node* loop;
+
+ While(OsrDeconstructorTester& R, Node* cond, bool is_osr, int backedges = 1)
+ : t(R) {
+ loop = t.NewLoop(is_osr, backedges);
+ branch = t.graph.NewNode(t.common.Branch(), cond, loop);
+ if_true = t.graph.NewNode(t.common.IfTrue(), branch);
+ exit = t.graph.NewNode(t.common.IfFalse(), branch);
+ loop->ReplaceInput(loop->InputCount() - 1, if_true);
+ }
+
+ void Nest(While& that) {
+ that.loop->ReplaceInput(that.loop->InputCount() - 1, exit);
+ this->loop->ReplaceInput(0, that.if_true);
+ }
+
+ Node* Phi(Node* i1, Node* i2, Node* i3) {
+ if (loop->InputCount() == 2) {
+ return t.graph.NewNode(t.common.Phi(kMachAnyTagged, 2), i1, i2, loop);
+ } else {
+ return t.graph.NewNode(t.common.Phi(kMachAnyTagged, 3), i1, i2, i3, loop);
+ }
+ }
+};
+
+
+static Node* FindSuccessor(Node* node, IrOpcode::Value opcode) {
+ for (Node* use : node->uses()) {
+ if (use->opcode() == opcode) return use;
+ }
+ UNREACHABLE(); // should have been found.
+ return nullptr;
+}
+
+
+TEST(Deconstruct_osr_nested1) {
+ OsrDeconstructorTester T(1);
+
+ While outer(T, T.p0, false);
+ While inner(T, T.p0, true);
+ inner.Nest(outer);
+
+ Node* outer_phi = outer.Phi(T.p0, T.p0, nullptr);
+ outer.branch->ReplaceInput(0, outer_phi);
+
+ Node* osr_phi = inner.Phi(T.jsgraph.OneConstant(), T.osr_values[0],
+ T.jsgraph.ZeroConstant());
+ inner.branch->ReplaceInput(0, osr_phi);
+ outer_phi->ReplaceInput(1, osr_phi);
+
+ Node* ret =
+ T.graph.NewNode(T.common.Return(), outer_phi, T.start, outer.exit);
+ Node* end = T.graph.NewNode(T.common.End(), ret);
+ T.graph.SetEnd(end);
+
+ T.DeconstructOsr();
+
+ // Check structure of deconstructed graph.
+ // Check inner OSR loop is directly connected to start.
+ CheckInputs(inner.loop, T.start, inner.if_true);
+ CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), inner.loop);
+
+ // Check control transfer to copy of outer loop.
+ Node* new_outer_loop = FindSuccessor(inner.exit, IrOpcode::kLoop);
+ Node* new_outer_phi = FindSuccessor(new_outer_loop, IrOpcode::kPhi);
+ CHECK_NE(new_outer_loop, outer.loop);
+ CHECK_NE(new_outer_phi, outer_phi);
+
+ CheckInputs(new_outer_loop, inner.exit, new_outer_loop->InputAt(1));
+
+ // Check structure of outer loop.
+ Node* new_outer_branch = FindSuccessor(new_outer_loop, IrOpcode::kBranch);
+ CHECK_NE(new_outer_branch, outer.branch);
+ CheckInputs(new_outer_branch, new_outer_phi, new_outer_loop);
+ Node* new_outer_exit = FindSuccessor(new_outer_branch, IrOpcode::kIfFalse);
+ Node* new_outer_if_true = FindSuccessor(new_outer_branch, IrOpcode::kIfTrue);
+
+ // Check structure of return.
+ end = T.graph.end();
+ Node* new_ret = end->InputAt(0);
+ CHECK_EQ(IrOpcode::kReturn, new_ret->opcode());
+ CheckInputs(new_ret, new_outer_phi, T.start, new_outer_exit);
+
+ // Check structure of inner loop.
+ Node* new_inner_loop = FindSuccessor(new_outer_if_true, IrOpcode::kLoop);
+ Node* new_inner_phi = FindSuccessor(new_inner_loop, IrOpcode::kPhi);
+
+ CheckInputs(new_inner_phi, T.jsgraph.OneConstant(), T.jsgraph.ZeroConstant(),
+ new_inner_loop);
+ CheckInputs(new_outer_phi, osr_phi, new_inner_phi, new_outer_loop);
+}
+
+
+TEST(Deconstruct_osr_nested2) {
+ OsrDeconstructorTester T(1);
+
+ // Test multiple backedge outer loop.
+ While outer(T, T.p0, false, 2);
+ While inner(T, T.p0, true);
+ inner.Nest(outer);
+
+ Node* outer_phi = outer.Phi(T.p0, T.p0, T.p0);
+ outer.branch->ReplaceInput(0, outer_phi);
+
+ Node* osr_phi = inner.Phi(T.jsgraph.OneConstant(), T.osr_values[0],
+ T.jsgraph.ZeroConstant());
+ inner.branch->ReplaceInput(0, osr_phi);
+ outer_phi->ReplaceInput(1, osr_phi);
+ outer_phi->ReplaceInput(2, T.jsgraph.ZeroConstant());
+
+ Node* x_branch = T.graph.NewNode(T.common.Branch(), osr_phi, inner.exit);
+ Node* x_true = T.graph.NewNode(T.common.IfTrue(), x_branch);
+ Node* x_false = T.graph.NewNode(T.common.IfFalse(), x_branch);
+
+ outer.loop->ReplaceInput(1, x_true);
+ outer.loop->ReplaceInput(2, x_false);
+
+ Node* ret =
+ T.graph.NewNode(T.common.Return(), outer_phi, T.start, outer.exit);
+ Node* end = T.graph.NewNode(T.common.End(), ret);
+ T.graph.SetEnd(end);
+
+ T.DeconstructOsr();
+
+ // Check structure of deconstructed graph.
+ // Check inner OSR loop is directly connected to start.
+ CheckInputs(inner.loop, T.start, inner.if_true);
+ CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), inner.loop);
+
+ // Check control transfer to copy of outer loop.
+ Node* new_merge = FindSuccessor(x_true, IrOpcode::kMerge);
+ CHECK_EQ(new_merge, FindSuccessor(x_false, IrOpcode::kMerge));
+ CheckInputs(new_merge, x_true, x_false);
+
+ Node* new_outer_loop = FindSuccessor(new_merge, IrOpcode::kLoop);
+ Node* new_outer_phi = FindSuccessor(new_outer_loop, IrOpcode::kPhi);
+ CHECK_NE(new_outer_loop, outer.loop);
+ CHECK_NE(new_outer_phi, outer_phi);
+
+ Node* new_entry_phi = FindSuccessor(new_merge, IrOpcode::kPhi);
+ CheckInputs(new_entry_phi, osr_phi, T.jsgraph.ZeroConstant(), new_merge);
+
+ CHECK_EQ(new_merge, new_outer_loop->InputAt(0));
+
+ // Check structure of outer loop.
+ Node* new_outer_branch = FindSuccessor(new_outer_loop, IrOpcode::kBranch);
+ CHECK_NE(new_outer_branch, outer.branch);
+ CheckInputs(new_outer_branch, new_outer_phi, new_outer_loop);
+ Node* new_outer_exit = FindSuccessor(new_outer_branch, IrOpcode::kIfFalse);
+ Node* new_outer_if_true = FindSuccessor(new_outer_branch, IrOpcode::kIfTrue);
+
+ // Check structure of return.
+ end = T.graph.end();
+ Node* new_ret = end->InputAt(0);
+ CHECK_EQ(IrOpcode::kReturn, new_ret->opcode());
+ CheckInputs(new_ret, new_outer_phi, T.start, new_outer_exit);
+
+ // Check structure of inner loop.
+ Node* new_inner_loop = FindSuccessor(new_outer_if_true, IrOpcode::kLoop);
+ Node* new_inner_phi = FindSuccessor(new_inner_loop, IrOpcode::kPhi);
+
+ CheckInputs(new_inner_phi, T.jsgraph.OneConstant(), T.jsgraph.ZeroConstant(),
+ new_inner_loop);
+ CheckInputs(new_outer_phi, new_entry_phi, new_inner_phi,
+ T.jsgraph.ZeroConstant(), new_outer_loop);
+}
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 2dc30294a2..55f054a74b 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -6,6 +6,7 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/graph-builder-tester.h"
#include "test/cctest/compiler/value-helper.h"
@@ -25,7 +26,8 @@ class RepresentationChangerTester : public HandleAndZoneScope,
explicit RepresentationChangerTester(int num_parameters = 0)
: GraphAndBuilders(main_zone()),
javascript_(main_zone()),
- jsgraph_(main_graph_, &main_common_, &javascript_, &main_machine_),
+ jsgraph_(main_isolate(), main_graph_, &main_common_, &javascript_,
+ &main_machine_),
changer_(&jsgraph_, &main_simplified_, main_isolate()) {
Node* s = graph()->NewNode(common()->Start(num_parameters));
graph()->SetStart(s);
@@ -57,7 +59,7 @@ class RepresentationChangerTester : public HandleAndZoneScope,
void CheckFloat64Constant(Node* n, double expected) {
Float64Matcher m(n);
CHECK(m.HasValue());
- CHECK_EQ(expected, m.Value());
+ CheckDoubleEq(expected, m.Value());
}
void CheckFloat32Constant(Node* n, float expected) {
@@ -76,7 +78,7 @@ class RepresentationChangerTester : public HandleAndZoneScope,
NumberMatcher m(n);
CHECK_EQ(IrOpcode::kNumberConstant, n->opcode());
CHECK(m.HasValue());
- CHECK_EQ(expected, m.Value());
+ CheckDoubleEq(expected, m.Value());
}
Node* Parameter(int index = 0) {
@@ -480,12 +482,6 @@ TEST(Nops) {
r.CheckNop(kRepFloat32 | kTypeNumber, kRepFloat32);
r.CheckNop(kRepFloat32, kRepFloat32 | kTypeNumber);
- // 32-bit or 64-bit words can be used as branch conditions (kRepBit).
- r.CheckNop(kRepWord32, kRepBit);
- r.CheckNop(kRepWord32, kRepBit | kTypeBool);
- r.CheckNop(kRepWord64, kRepBit);
- r.CheckNop(kRepWord64, kRepBit | kTypeBool);
-
// 32-bit words can be used as smaller word sizes and vice versa, because
// loads from memory implicitly sign or zero extend the value to the
// full machine word size, and stores implicitly truncate.
@@ -510,6 +506,16 @@ TEST(Nops) {
TEST(TypeErrors) {
RepresentationChangerTester r;
+ // Wordish cannot be implicitly converted to/from comparison conditions.
+ r.CheckTypeError(kRepWord8, kRepBit);
+ r.CheckTypeError(kRepWord8, kRepBit | kTypeBool);
+ r.CheckTypeError(kRepWord16, kRepBit);
+ r.CheckTypeError(kRepWord16, kRepBit | kTypeBool);
+ r.CheckTypeError(kRepWord32, kRepBit);
+ r.CheckTypeError(kRepWord32, kRepBit | kTypeBool);
+ r.CheckTypeError(kRepWord64, kRepBit);
+ r.CheckTypeError(kRepWord64, kRepBit | kTypeBool);
+
// Floats cannot be implicitly converted to/from comparison conditions.
r.CheckTypeError(kRepFloat64, kRepBit);
r.CheckTypeError(kRepFloat64, kRepBit | kTypeBool);
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index 76cbb8fc58..bd4038ed91 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -11,7 +11,6 @@ using namespace v8::internal::compiler;
uint32_t flags = CompilationInfo::kInliningEnabled;
TEST(IsSmi) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_IsSmi(a); })", flags);
@@ -25,7 +24,6 @@ TEST(IsSmi) {
TEST(IsNonNegativeSmi) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_IsNonNegativeSmi(a); })", flags);
@@ -39,7 +37,6 @@ TEST(IsNonNegativeSmi) {
TEST(IsMinusZero) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_IsMinusZero(a); })", flags);
@@ -53,7 +50,6 @@ TEST(IsMinusZero) {
TEST(IsArray) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_IsArray(a); })", flags);
@@ -69,7 +65,6 @@ TEST(IsArray) {
TEST(IsObject) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_IsObject(a); })", flags);
@@ -85,7 +80,6 @@ TEST(IsObject) {
TEST(IsFunction) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_IsFunction(a); })", flags);
@@ -101,7 +95,6 @@ TEST(IsFunction) {
TEST(IsRegExp) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_IsRegExp(a); })", flags);
@@ -117,7 +110,6 @@ TEST(IsRegExp) {
TEST(ClassOf) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_ClassOf(a); })", flags);
@@ -133,7 +125,6 @@ TEST(ClassOf) {
TEST(ObjectEquals) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a,b) { return %_ObjectEquals(a,b); })", flags);
CompileRun("var o = {}");
@@ -148,7 +139,6 @@ TEST(ObjectEquals) {
TEST(ValueOf) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_ValueOf(a); })", flags);
@@ -160,7 +150,6 @@ TEST(ValueOf) {
TEST(SetValueOf) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a,b) { return %_SetValueOf(a,b); })", flags);
@@ -171,7 +160,6 @@ TEST(SetValueOf) {
TEST(StringCharFromCode) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a) { return %_StringCharFromCode(a); })", flags);
@@ -182,7 +170,6 @@ TEST(StringCharFromCode) {
TEST(StringCharAt) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a,b) { return %_StringCharAt(a,b); })", flags);
@@ -193,7 +180,6 @@ TEST(StringCharAt) {
TEST(StringCharCodeAt) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a,b) { return %_StringCharCodeAt(a,b); })",
flags);
@@ -205,7 +191,6 @@ TEST(StringCharCodeAt) {
TEST(StringAdd) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a,b) { return %_StringAdd(a,b); })", flags);
@@ -216,7 +201,6 @@ TEST(StringAdd) {
TEST(StringSubString) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a,b) { return %_SubString(a,b,b+3); })", flags);
@@ -227,7 +211,6 @@ TEST(StringSubString) {
TEST(StringCompare) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a,b) { return %_StringCompare(a,b); })", flags);
@@ -238,7 +221,6 @@ TEST(StringCompare) {
TEST(CallFunction) {
- FLAG_turbo_inlining_intrinsics = true;
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a,b) { return %_CallFunction(a, 1, 2, 3, b); })",
flags);
diff --git a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
index 7a4a0b335b..5a7bdb9b4a 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
@@ -168,6 +168,33 @@ TEST(ForInContinueStatement) {
}
+TEST(ForOfContinueStatement) {
+ const char* src =
+ "(function(a,b) {"
+ " var r = '-';"
+ " for (var x of a) {"
+ " r += x + '-';"
+ " if (b) continue;"
+ " r += 'X-';"
+ " }"
+ " return r;"
+ "})";
+ FunctionTester T(src);
+
+ CompileRun(
+ "function wrap(v) {"
+ " var iterable = {};"
+ " function next() { return { done:!v.length, value:v.shift() }; };"
+ " iterable[Symbol.iterator] = function() { return { next:next }; };"
+ " return iterable;"
+ "}");
+
+ T.CheckCall(T.Val("-"), T.NewObject("wrap([])"), T.true_value());
+ T.CheckCall(T.Val("-1-2-"), T.NewObject("wrap([1,2])"), T.true_value());
+ T.CheckCall(T.Val("-1-X-2-X-"), T.NewObject("wrap([1,2])"), T.false_value());
+}
+
+
TEST(SwitchStatement) {
const char* src =
"(function(a,b) {"
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index dec7194c4a..43ab38b8c2 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -184,26 +184,6 @@ TEST(RuntimeCallInline) {
}
-TEST(RuntimeCallBooleanize) {
- // TODO(turbofan): %Booleanize will disappear, don't hesitate to remove this
- // test case, two-argument case is covered by the above test already.
- FLAG_allow_natives_syntax = true;
- FunctionTester T("(function(a,b) { return %Booleanize(a, b); })");
-
- T.CheckCall(T.true_value(), T.Val(-1), T.Val(Token::LT));
- T.CheckCall(T.false_value(), T.Val(-1), T.Val(Token::EQ));
- T.CheckCall(T.false_value(), T.Val(-1), T.Val(Token::GT));
-
- T.CheckCall(T.false_value(), T.Val(0.0), T.Val(Token::LT));
- T.CheckCall(T.true_value(), T.Val(0.0), T.Val(Token::EQ));
- T.CheckCall(T.false_value(), T.Val(0.0), T.Val(Token::GT));
-
- T.CheckCall(T.false_value(), T.Val(1), T.Val(Token::LT));
- T.CheckCall(T.false_value(), T.Val(1), T.Val(Token::EQ));
- T.CheckCall(T.true_value(), T.Val(1), T.Val(Token::GT));
-}
-
-
TEST(EvalCall) {
FunctionTester T("(function(a,b) { return eval(a); })");
Handle<JSObject> g(T.function->context()->global_object()->global_proxy());
diff --git a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
index 0712ab6205..74990daac9 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
@@ -10,6 +10,7 @@ using namespace v8::internal;
using namespace v8::internal::compiler;
TEST(Throw) {
+ i::FLAG_turbo_exceptions = true;
FunctionTester T("(function(a,b) { if (a) { throw b; } else { return b; }})");
T.CheckThrows(T.true_value(), T.NewObject("new Error"));
@@ -43,3 +44,117 @@ TEST(ThrowSourcePosition) {
CHECK_EQ(4, message->GetLineNumber());
CHECK_EQ(95, message->GetStartPosition());
}
+
+
+// TODO(mstarzinger): Increase test coverage by having similar tests within the
+// mjsunit suite to also test integration with other components (e.g. OSR).
+
+
+TEST(Catch) {
+ i::FLAG_turbo_exceptions = true;
+ const char* src =
+ "(function(a,b) {"
+ " var r = '-';"
+ " try {"
+ " r += 'A-';"
+ " throw 'B-';"
+ " } catch (e) {"
+ " r += e;"
+ " }"
+ " return r;"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val("-A-B-"));
+}
+
+
+TEST(CatchNested) {
+ i::FLAG_turbo_exceptions = true;
+ const char* src =
+ "(function(a,b) {"
+ " var r = '-';"
+ " try {"
+ " r += 'A-';"
+ " throw 'C-';"
+ " } catch (e) {"
+ " try {"
+ " throw 'B-';"
+ " } catch (e) {"
+ " r += e;"
+ " }"
+ " r += e;"
+ " }"
+ " return r;"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val("-A-B-C-"));
+}
+
+
+TEST(CatchBreak) {
+ i::FLAG_turbo_exceptions = true;
+ const char* src =
+ "(function(a,b) {"
+ " var r = '-';"
+ " L: try {"
+ " r += 'A-';"
+ " if (a) break L;"
+ " r += 'B-';"
+ " throw 'C-';"
+ " } catch (e) {"
+ " if (b) break L;"
+ " r += e;"
+ " }"
+ " r += 'D-';"
+ " return r;"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val("-A-D-"), T.true_value(), T.false_value());
+ T.CheckCall(T.Val("-A-B-D-"), T.false_value(), T.true_value());
+ T.CheckCall(T.Val("-A-B-C-D-"), T.false_value(), T.false_value());
+}
+
+
+TEST(Finally) {
+ i::FLAG_turbo_exceptions = true;
+ const char* src =
+ "(function(a,b) {"
+ " var r = '-';"
+ " try {"
+ " r += 'A-';"
+ " } finally {"
+ " r += 'B-';"
+ " }"
+ " return r;"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val("-A-B-"));
+}
+
+
+TEST(FinallyBreak) {
+ i::FLAG_turbo_exceptions = true;
+ const char* src =
+ "(function(a,b) {"
+ " var r = '-';"
+ " L: try {"
+ " r += 'A-';"
+ " if (a) return r;"
+ " r += 'B-';"
+ " if (b) break L;"
+ " r += 'C-';"
+ " } finally {"
+ " r += 'D-';"
+ " }"
+ " return r;"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val("-A-"), T.true_value(), T.false_value());
+ T.CheckCall(T.Val("-A-B-D-"), T.false_value(), T.true_value());
+ T.CheckCall(T.Val("-A-B-C-D-"), T.false_value(), T.false_value());
+}
diff --git a/deps/v8/test/cctest/compiler/test-run-jsops.cc b/deps/v8/test/cctest/compiler/test-run-jsops.cc
index eb39760ff7..bb7c239a59 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsops.cc
@@ -212,7 +212,7 @@ TEST(BinopLessThan) {
}
-TEST(BinopLessThanEqual) {
+TEST(BinopLessThanOrEqual) {
FunctionTester T("(function(a,b) { return a <= b; })");
T.CheckTrue(7, 8);
@@ -522,3 +522,24 @@ TEST(RegExpLiteral) {
T.CheckTrue(T.Val("abc"));
T.CheckFalse(T.Val("xyz"));
}
+
+
+TEST(ClassLiteral) {
+ FLAG_harmony_classes = true;
+ FLAG_harmony_sloppy = true;
+ FLAG_harmony_object_literals = true;
+ const char* src =
+ "(function(a,b) {"
+ " class C {"
+ " x() { return a; }"
+ " static y() { return b; }"
+ " get z() { return 0; }"
+ " constructor() {}"
+ " }"
+ " return new C().x() + C.y();"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+ T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 974d4cef54..5a55ce6e23 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -15,10 +15,6 @@
#if V8_TURBOFAN_TARGET
using namespace v8::base;
-
-#define CHECK_UINT32_EQ(x, y) \
- CHECK_EQ(static_cast<int32_t>(x), static_cast<int32_t>(y))
-
using namespace v8::internal;
using namespace v8::internal::compiler;
@@ -451,6 +447,115 @@ TEST(RunLoopIncrementFloat64) {
}
+TEST(RunSwitch1) {
+ RawMachineAssemblerTester<int32_t> m;
+
+ int constant = 11223344;
+
+ MLabel block0, block1, def, end;
+ MLabel* case_labels[] = {&block0, &block1};
+ int32_t case_values[] = {0, 1};
+ m.Switch(m.Int32Constant(0), &def, case_values, case_labels,
+ arraysize(case_labels));
+ m.Bind(&block0);
+ m.Goto(&end);
+ m.Bind(&block1);
+ m.Goto(&end);
+ m.Bind(&def);
+ m.Goto(&end);
+ m.Bind(&end);
+ m.Return(m.Int32Constant(constant));
+
+ CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunSwitch2) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+
+ MLabel blocka, blockb, blockc;
+ MLabel* case_labels[] = {&blocka, &blockb};
+ int32_t case_values[] = {std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::max()};
+ m.Switch(m.Parameter(0), &blockc, case_values, case_labels,
+ arraysize(case_labels));
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(-1));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&blockc);
+ m.Return(m.Int32Constant(0));
+
+ CHECK_EQ(1, m.Call(std::numeric_limits<int32_t>::max()));
+ CHECK_EQ(-1, m.Call(std::numeric_limits<int32_t>::min()));
+ for (int i = -100; i < 100; i += 25) {
+ CHECK_EQ(0, m.Call(i));
+ }
+}
+
+
+TEST(RunSwitch3) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+
+ MLabel blocka, blockb, blockc;
+ MLabel* case_labels[] = {&blocka, &blockb};
+ int32_t case_values[] = {std::numeric_limits<int32_t>::min() + 0,
+ std::numeric_limits<int32_t>::min() + 1};
+ m.Switch(m.Parameter(0), &blockc, case_values, case_labels,
+ arraysize(case_labels));
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&blockc);
+ m.Return(m.Int32Constant(2));
+
+ CHECK_EQ(0, m.Call(std::numeric_limits<int32_t>::min() + 0));
+ CHECK_EQ(1, m.Call(std::numeric_limits<int32_t>::min() + 1));
+ for (int i = -100; i < 100; i += 25) {
+ CHECK_EQ(2, m.Call(i));
+ }
+}
+
+
+TEST(RunSwitch4) {
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+
+ const size_t kNumCases = 512;
+ const size_t kNumValues = kNumCases + 1;
+ int32_t values[kNumValues];
+ m.main_isolate()->random_number_generator()->NextBytes(values,
+ sizeof(values));
+ MLabel end, def;
+ int32_t case_values[kNumCases];
+ MLabel* case_labels[kNumCases];
+ Node* results[kNumValues];
+ for (size_t i = 0; i < kNumCases; ++i) {
+ case_values[i] = static_cast<int32_t>(i);
+ case_labels[i] = new (m.main_zone()->New(sizeof(MLabel))) MLabel;
+ }
+ m.Switch(m.Parameter(0), &def, case_values, case_labels,
+ arraysize(case_labels));
+ for (size_t i = 0; i < kNumCases; ++i) {
+ m.Bind(case_labels[i]);
+ results[i] = m.Int32Constant(values[i]);
+ m.Goto(&end);
+ }
+ m.Bind(&def);
+ results[kNumCases] = m.Int32Constant(values[kNumCases]);
+ m.Goto(&end);
+ m.Bind(&end);
+ const int num_results = static_cast<int>(arraysize(results));
+ Node* phi =
+ m.NewNode(m.common()->Phi(kMachInt32, num_results), num_results, results);
+ m.Return(phi);
+
+ for (size_t i = 0; i < kNumValues; ++i) {
+ CHECK_EQ(values[i], m.Call(static_cast<int>(i)));
+ }
+}
+
+
TEST(RunLoadInt32) {
RawMachineAssemblerTester<int32_t> m;
@@ -505,7 +610,7 @@ TEST(RunLoadStoreFloat64Offset) {
p1 = *j;
p2 = *j - 5;
CHECK_EQ(magic, m.Call());
- CHECK_EQ(p1, p2);
+ CheckDoubleEq(p1, p2);
}
}
}
@@ -763,7 +868,7 @@ TEST(RunInt32AddInBranch) {
static const int32_t constant = 987654321;
{
RawMachineAssemblerTester<int32_t> m;
- Uint32BinopTester bt(&m);
+ Int32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -781,7 +886,7 @@ TEST(RunInt32AddInBranch) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Uint32BinopTester bt(&m);
+ Int32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -810,7 +915,7 @@ TEST(RunInt32AddInBranch) {
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -827,7 +932,7 @@ TEST(RunInt32AddInBranch) {
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -885,7 +990,7 @@ TEST(RunInt32AddInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i + *j) == 0;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -897,7 +1002,7 @@ TEST(RunInt32AddInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i + *j) == 0;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -908,7 +1013,7 @@ TEST(RunInt32AddInComparison) {
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i + *j) == 0;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -919,7 +1024,7 @@ TEST(RunInt32AddInComparison) {
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*j + *i) == 0;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -971,7 +1076,7 @@ TEST(RunInt32SubP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = static_cast<int32_t>(*i - *j);
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -984,7 +1089,7 @@ TEST(RunInt32SubImm) {
m.Return(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i - *j;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -994,7 +1099,7 @@ TEST(RunInt32SubImm) {
m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j - *i;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1072,8 +1177,8 @@ TEST(RunInt32SubAndWord32ShrP) {
FOR_UINT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
// Use uint32_t because signed overflow is UB in C.
- int32_t expected = *i - (*j >> shift);
- CHECK_UINT32_EQ(expected, m.Call(*i, *j, shift));
+ uint32_t expected = *i - (*j >> shift);
+ CHECK_EQ(expected, m.Call(*i, *j, shift));
}
}
}
@@ -1087,7 +1192,7 @@ TEST(RunInt32SubAndWord32ShrP) {
FOR_UINT32_SHIFTS(shift) {
FOR_UINT32_INPUTS(k) {
// Use uint32_t because signed overflow is UB in C.
- int32_t expected = (*i >> shift) - *k;
+ uint32_t expected = (*i >> shift) - *k;
CHECK_EQ(expected, m.Call(*i, shift, *k));
}
}
@@ -1100,7 +1205,7 @@ TEST(RunInt32SubInBranch) {
static const int constant = 987654321;
{
RawMachineAssemblerTester<int32_t> m;
- Uint32BinopTester bt(&m);
+ Int32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -1118,7 +1223,7 @@ TEST(RunInt32SubInBranch) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Uint32BinopTester bt(&m);
+ Int32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -1146,7 +1251,7 @@ TEST(RunInt32SubInBranch) {
m.Bind(&blockb);
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
- int32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
+ uint32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
CHECK_EQ(expected, m.Call(*j));
}
}
@@ -1222,7 +1327,7 @@ TEST(RunInt32SubInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i - *j) == 0;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1234,7 +1339,7 @@ TEST(RunInt32SubInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i - *j) == 0;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1245,7 +1350,7 @@ TEST(RunInt32SubInComparison) {
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i - *j) == 0;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1256,7 +1361,7 @@ TEST(RunInt32SubInComparison) {
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*j - *i) == 0;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1318,7 +1423,7 @@ TEST(RunInt32MulP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i * *j;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1346,7 +1451,7 @@ TEST(RunInt32MulImm) {
m.Return(m.Int32Mul(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i * *j;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1356,7 +1461,7 @@ TEST(RunInt32MulImm) {
m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(*i)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j * *i;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1527,7 +1632,7 @@ TEST(RunUint32DivP) {
uint32_t p0 = *i;
uint32_t p1 = *j;
if (p1 != 0) {
- uint32_t expected = static_cast<uint32_t>(p0 / p1);
+ int32_t expected = bit_cast<int32_t>(p0 / p1);
CHECK_EQ(expected, bt.call(p0, p1));
}
}
@@ -1542,7 +1647,7 @@ TEST(RunUint32DivP) {
uint32_t p0 = *i;
uint32_t p1 = *j;
if (p1 != 0) {
- uint32_t expected = static_cast<uint32_t>(p0 + (p0 / p1));
+ int32_t expected = bit_cast<int32_t>(p0 + (p0 / p1));
CHECK_EQ(expected, bt.call(p0, p1));
}
}
@@ -1588,7 +1693,7 @@ TEST(RunInt32ModP) {
TEST(RunUint32ModP) {
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(m.Uint32Mod(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
@@ -1603,7 +1708,7 @@ TEST(RunUint32ModP) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Int32BinopTester bt(&m);
+ Uint32BinopTester bt(&m);
bt.AddReturn(m.Int32Add(bt.param0, m.Uint32Mod(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
@@ -1626,7 +1731,7 @@ TEST(RunWord32AndP) {
bt.AddReturn(m.Word32And(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i & *j;
+ int32_t expected = *i & *j;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -1637,7 +1742,7 @@ TEST(RunWord32AndP) {
bt.AddReturn(m.Word32And(bt.param0, m.Word32Not(bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = *i & ~(*j);
+ int32_t expected = *i & ~(*j);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -1648,7 +1753,7 @@ TEST(RunWord32AndP) {
bt.AddReturn(m.Word32And(m.Word32Not(bt.param0), bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- uint32_t expected = ~(*i) & *j;
+ int32_t expected = ~(*i) & *j;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -1665,7 +1770,7 @@ TEST(RunWord32AndAndWord32ShlP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i << (*j & 0x1f);
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1677,7 +1782,7 @@ TEST(RunWord32AndAndWord32ShlP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i << (0x1f & *j);
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1693,7 +1798,7 @@ TEST(RunWord32AndAndWord32ShrP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i >> (*j & 0x1f);
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1705,7 +1810,7 @@ TEST(RunWord32AndAndWord32ShrP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i >> (0x1f & *j);
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1732,7 +1837,7 @@ TEST(RunWord32AndAndWord32SarP) {
m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- uint32_t expected = *i >> (0x1f & *j);
+ int32_t expected = *i >> (0x1f & *j);
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -1747,7 +1852,7 @@ TEST(RunWord32AndImm) {
m.Return(m.Word32And(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i & *j;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1757,7 +1862,7 @@ TEST(RunWord32AndImm) {
m.Return(m.Word32And(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i & ~(*j);
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1768,7 +1873,7 @@ TEST(RunWord32AndInBranch) {
static const int constant = 987654321;
{
RawMachineAssemblerTester<int32_t> m;
- Uint32BinopTester bt(&m);
+ Int32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -1786,7 +1891,7 @@ TEST(RunWord32AndInBranch) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Uint32BinopTester bt(&m);
+ Int32BinopTester bt(&m);
MLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
@@ -1891,7 +1996,7 @@ TEST(RunWord32AndInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i & *j) == 0;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1903,7 +2008,7 @@ TEST(RunWord32AndInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i & *j) == 0;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1914,7 +2019,7 @@ TEST(RunWord32AndInComparison) {
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i & *j) == 0;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1925,7 +2030,7 @@ TEST(RunWord32AndInComparison) {
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*j & *i) == 0;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1940,7 +2045,7 @@ TEST(RunWord32OrP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | *j;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1951,7 +2056,7 @@ TEST(RunWord32OrP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | ~(*j);
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1962,7 +2067,7 @@ TEST(RunWord32OrP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = ~(*i) | *j;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -1976,7 +2081,7 @@ TEST(RunWord32OrImm) {
m.Return(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | *j;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -1986,7 +2091,7 @@ TEST(RunWord32OrImm) {
m.Return(m.Word32Or(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | ~(*j);
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -2113,7 +2218,7 @@ TEST(RunWord32OrInBranch) {
TEST(RunWord32OrInComparison) {
{
RawMachineAssemblerTester<int32_t> m;
- Uint32BinopTester bt(&m);
+ Int32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
@@ -2125,7 +2230,7 @@ TEST(RunWord32OrInComparison) {
}
{
RawMachineAssemblerTester<int32_t> m;
- Uint32BinopTester bt(&m);
+ Int32BinopTester bt(&m);
bt.AddReturn(
m.Word32Equal(m.Int32Constant(0), m.Word32Or(bt.param0, bt.param1)));
FOR_UINT32_INPUTS(i) {
@@ -2142,7 +2247,7 @@ TEST(RunWord32OrInComparison) {
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i | *j) == 0;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -2153,7 +2258,7 @@ TEST(RunWord32OrInComparison) {
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*j | *i) == 0;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -2163,11 +2268,11 @@ TEST(RunWord32OrInComparison) {
TEST(RunWord32XorP) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i ^ *j;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -2177,8 +2282,8 @@ TEST(RunWord32XorP) {
bt.AddReturn(m.Word32Xor(bt.param0, bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
- int32_t expected = *i ^ *j;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ uint32_t expected = *i ^ *j;
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -2210,7 +2315,7 @@ TEST(RunWord32XorP) {
m.Return(m.Word32Xor(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i ^ ~(*j);
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -2233,7 +2338,7 @@ TEST(RunWord32XorInBranch) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -2251,7 +2356,7 @@ TEST(RunWord32XorInBranch) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
- CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+ CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
@@ -2268,7 +2373,7 @@ TEST(RunWord32XorInBranch) {
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -2286,7 +2391,7 @@ TEST(RunWord32XorInBranch) {
m.Return(m.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(j) {
uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -2342,7 +2447,7 @@ TEST(RunWord32ShlP) {
m.Return(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j << shift;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -2353,7 +2458,7 @@ TEST(RunWord32ShlP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
uint32_t expected = *i << shift;
- CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ CHECK_EQ(expected, bt.call(*i, shift));
}
}
}
@@ -2369,7 +2474,7 @@ TEST(RunWord32ShlInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
uint32_t expected = 0 == (*i << shift);
- CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ CHECK_EQ(expected, bt.call(*i, shift));
}
}
}
@@ -2381,31 +2486,31 @@ TEST(RunWord32ShlInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
uint32_t expected = 0 == (*i << shift);
- CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ CHECK_EQ(expected, bt.call(*i, shift));
}
}
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(
m.Word32Equal(m.Int32Constant(0),
m.Word32Shl(m.Parameter(0), m.Int32Constant(shift))));
FOR_UINT32_INPUTS(i) {
uint32_t expected = 0 == (*i << shift);
- CHECK_UINT32_EQ(expected, m.Call(*i));
+ CHECK_EQ(expected, m.Call(*i));
}
}
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(
m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
uint32_t expected = 0 == (*i << shift);
- CHECK_UINT32_EQ(expected, m.Call(*i));
+ CHECK_EQ(expected, m.Call(*i));
}
}
}
@@ -2419,7 +2524,7 @@ TEST(RunWord32ShrP) {
m.Return(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j >> shift;
- CHECK_UINT32_EQ(expected, m.Call(*j));
+ CHECK_EQ(expected, m.Call(*j));
}
}
}
@@ -2430,10 +2535,10 @@ TEST(RunWord32ShrP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
uint32_t expected = *i >> shift;
- CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ CHECK_EQ(expected, bt.call(*i, shift));
}
}
- CHECK_EQ(0x00010000, bt.call(0x80000000, 15));
+ CHECK_EQ(0x00010000u, bt.call(0x80000000, 15));
}
}
@@ -2447,7 +2552,7 @@ TEST(RunWord32ShrInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
uint32_t expected = 0 == (*i >> shift);
- CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ CHECK_EQ(expected, bt.call(*i, shift));
}
}
}
@@ -2459,31 +2564,31 @@ TEST(RunWord32ShrInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
uint32_t expected = 0 == (*i >> shift);
- CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ CHECK_EQ(expected, bt.call(*i, shift));
}
}
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(
m.Word32Equal(m.Int32Constant(0),
m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
FOR_UINT32_INPUTS(i) {
uint32_t expected = 0 == (*i >> shift);
- CHECK_UINT32_EQ(expected, m.Call(*i));
+ CHECK_EQ(expected, m.Call(*i));
}
}
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(
m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
uint32_t expected = 0 == (*i >> shift);
- CHECK_UINT32_EQ(expected, m.Call(*i));
+ CHECK_EQ(expected, m.Call(*i));
}
}
}
@@ -2511,7 +2616,7 @@ TEST(RunWord32SarP) {
CHECK_EQ(expected, bt.call(*i, shift));
}
}
- CHECK_EQ(0xFFFF0000, bt.call(0x80000000, 15));
+ CHECK_EQ(bit_cast<int32_t>(0xFFFF0000), bt.call(0x80000000, 15));
}
}
@@ -2560,7 +2665,7 @@ TEST(RunWord32SarInComparison) {
m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
FOR_INT32_INPUTS(i) {
- uint32_t expected = 0 == (*i >> shift);
+ int32_t expected = 0 == (*i >> shift);
CHECK_EQ(expected, m.Call(*i));
}
}
@@ -2586,7 +2691,7 @@ TEST(RunWord32RorP) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
uint32_t expected = bits::RotateRight32(*i, shift);
- CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ CHECK_EQ(expected, bt.call(*i, shift));
}
}
}
@@ -2602,7 +2707,7 @@ TEST(RunWord32RorInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
uint32_t expected = 0 == bits::RotateRight32(*i, shift);
- CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ CHECK_EQ(expected, bt.call(*i, shift));
}
}
}
@@ -2614,31 +2719,31 @@ TEST(RunWord32RorInComparison) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_SHIFTS(shift) {
uint32_t expected = 0 == bits::RotateRight32(*i, shift);
- CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+ CHECK_EQ(expected, bt.call(*i, shift));
}
}
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(
m.Word32Equal(m.Int32Constant(0),
m.Word32Ror(m.Parameter(0), m.Int32Constant(shift))));
FOR_UINT32_INPUTS(i) {
uint32_t expected = 0 == bits::RotateRight32(*i, shift);
- CHECK_UINT32_EQ(expected, m.Call(*i));
+ CHECK_EQ(expected, m.Call(*i));
}
}
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
m.Return(
m.Word32Equal(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
uint32_t expected = 0 == bits::RotateRight32(*i, shift);
- CHECK_UINT32_EQ(expected, m.Call(*i));
+ CHECK_EQ(expected, m.Call(*i));
}
}
}
@@ -2964,7 +3069,7 @@ TEST(RunFloat64AddP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
double expected = *pl + *pr;
- CHECK_EQ(expected, bt.call(*pl, *pr));
+ CheckDoubleEq(expected, bt.call(*pl, *pr));
}
}
}
@@ -2979,7 +3084,7 @@ TEST(RunFloat64SubP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
double expected = *pl - *pr;
- CHECK_EQ(expected, bt.call(*pl, *pr));
+ CheckDoubleEq(expected, bt.call(*pl, *pr));
}
}
}
@@ -2999,7 +3104,7 @@ TEST(RunFloat64SubImm1) {
input = *j;
double expected = *i - input;
CHECK_EQ(0, m.Call());
- CHECK_EQ(expected, output);
+ CheckDoubleEq(expected, output);
}
}
}
@@ -3019,7 +3124,7 @@ TEST(RunFloat64SubImm2) {
input = *j;
double expected = input - *i;
CHECK_EQ(0, m.Call());
- CHECK_EQ(expected, output);
+ CheckDoubleEq(expected, output);
}
}
}
@@ -3034,7 +3139,7 @@ TEST(RunFloat64MulP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
double expected = *pl * *pr;
- CHECK_EQ(expected, bt.call(*pl, *pr));
+ CheckDoubleEq(expected, bt.call(*pl, *pr));
}
}
}
@@ -3063,7 +3168,7 @@ TEST(RunFloat64MulAndFloat64AddP) {
volatile double temp = input_a * input_b;
volatile double expected = temp + input_c;
CHECK_EQ(0, m.Call());
- CHECK_EQ(expected, output);
+ CheckDoubleEq(expected, output);
}
}
}
@@ -3085,7 +3190,7 @@ TEST(RunFloat64MulAndFloat64AddP) {
volatile double temp = input_b * input_c;
volatile double expected = input_a + temp;
CHECK_EQ(0, m.Call());
- CHECK_EQ(expected, output);
+ CheckDoubleEq(expected, output);
}
}
}
@@ -3115,7 +3220,7 @@ TEST(RunFloat64MulAndFloat64SubP) {
volatile double temp = input_b * input_c;
volatile double expected = input_a - temp;
CHECK_EQ(0, m.Call());
- CHECK_EQ(expected, output);
+ CheckDoubleEq(expected, output);
}
}
}
@@ -3137,7 +3242,7 @@ TEST(RunFloat64MulImm) {
input = *j;
double expected = *i * input;
CHECK_EQ(0, m.Call());
- CHECK_EQ(expected, output);
+ CheckDoubleEq(expected, output);
}
}
}
@@ -3152,7 +3257,7 @@ TEST(RunFloat64MulImm) {
input = *j;
double expected = input * *i;
CHECK_EQ(0, m.Call());
- CHECK_EQ(expected, output);
+ CheckDoubleEq(expected, output);
}
}
}
@@ -3168,7 +3273,7 @@ TEST(RunFloat64DivP) {
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
double expected = *pl / *pr;
- CHECK_EQ(expected, bt.call(*pl, *pr));
+ CheckDoubleEq(expected, bt.call(*pl, *pr));
}
}
}
@@ -3184,7 +3289,7 @@ TEST(RunFloat64ModP) {
FOR_FLOAT64_INPUTS(j) {
double expected = modulo(*i, *j);
double found = bt.call(*i, *j);
- CHECK_EQ(expected, found);
+ CheckDoubleEq(expected, found);
}
}
}
@@ -3223,7 +3328,7 @@ TEST(RunChangeInt32ToFloat64_B) {
TEST(RunChangeUint32ToFloat64_B) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(kMachUint32);
double output = 0;
Node* convert = m.ChangeUint32ToFloat64(m.Parameter(0));
@@ -3404,7 +3509,7 @@ TEST(RunChangeFloat64ToInt32_spilled) {
TEST(RunChangeFloat64ToUint32_spilled) {
RawMachineAssemblerTester<uint32_t> m;
const int kNumInputs = 32;
- int32_t magic = 0x786234;
+ uint32_t magic = 0x786234;
double input[kNumInputs];
uint32_t result[kNumInputs];
Node* input_node[kNumInputs];
@@ -3433,9 +3538,9 @@ TEST(RunChangeFloat64ToUint32_spilled) {
for (int i = 0; i < kNumInputs; i++) {
if (i % 2) {
- CHECK_UINT32_EQ(result[i], static_cast<uint32_t>(100 + i + 2147483648u));
+ CHECK_EQ(result[i], static_cast<uint32_t>(100 + i + 2147483648u));
} else {
- CHECK_UINT32_EQ(result[i], static_cast<uint32_t>(100 + i));
+ CHECK_EQ(result[i], static_cast<uint32_t>(100 + i));
}
}
}
@@ -3444,7 +3549,7 @@ TEST(RunChangeFloat64ToUint32_spilled) {
TEST(RunTruncateFloat64ToFloat32_spilled) {
RawMachineAssemblerTester<uint32_t> m;
const int kNumInputs = 32;
- int32_t magic = 0x786234;
+ uint32_t magic = 0x786234;
double input[kNumInputs];
float result[kNumInputs];
Node* input_node[kNumInputs];
@@ -3890,7 +3995,7 @@ TEST(RunFloat64UnorderedCompare) {
m.machine()->Float64LessThan(),
m.machine()->Float64LessThanOrEqual()};
- double nan = v8::base::OS::nan_value();
+ double nan = std::numeric_limits<double>::quiet_NaN();
FOR_FLOAT64_INPUTS(i) {
for (size_t o = 0; o < arraysize(operators); ++o) {
@@ -4368,7 +4473,7 @@ TEST(RunTruncateInt64ToInt32P) {
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
expected = (static_cast<uint64_t>(*j) << 32) | *i;
- CHECK_UINT32_EQ(expected, m.Call());
+ CHECK_EQ(static_cast<int32_t>(expected), m.Call());
}
}
}
@@ -4385,9 +4490,9 @@ TEST(RunTruncateFloat64ToInt32P) {
{-1.5, -1},
{5.5, 5},
{-5.0, -5},
- {v8::base::OS::nan_value(), 0},
+ {std::numeric_limits<double>::quiet_NaN(), 0},
{std::numeric_limits<double>::infinity(), 0},
- {-v8::base::OS::nan_value(), 0},
+ {-std::numeric_limits<double>::quiet_NaN(), 0},
{-std::numeric_limits<double>::infinity(), 0},
{4.94065645841e-324, 0},
{-4.94065645841e-324, 0},
@@ -4504,7 +4609,7 @@ TEST(RunTruncateFloat64ToFloat32) {
input = *i;
volatile double expected = DoubleToFloat32(input);
CHECK_EQ(0, m.Call());
- CHECK_EQ(expected, actual);
+ CheckDoubleEq(expected, actual);
}
}
diff --git a/deps/v8/test/cctest/compiler/test-schedule.cc b/deps/v8/test/cctest/compiler/test-schedule.cc
deleted file mode 100644
index 1eb3547187..0000000000
--- a/deps/v8/test/cctest/compiler/test-schedule.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
-#include "src/compiler/operator.h"
-#include "src/compiler/schedule.h"
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-
-static Operator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
- "dummy", 0, 0, 0, 0, 0, 0);
-
-TEST(TestScheduleAllocation) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- CHECK_NE(NULL, schedule.start());
- CHECK_EQ(schedule.start(), schedule.GetBlockById(BasicBlock::Id::FromInt(0)));
-}
-
-
-TEST(TestScheduleAddNode) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- Graph graph(scope.main_zone());
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
-
- BasicBlock* entry = schedule.start();
- schedule.AddNode(entry, n0);
- schedule.AddNode(entry, n1);
-
- CHECK_EQ(entry, schedule.block(n0));
- CHECK_EQ(entry, schedule.block(n1));
- CHECK(schedule.SameBasicBlock(n0, n1));
-
- Node* n2 = graph.NewNode(&dummy_operator);
- CHECK_EQ(NULL, schedule.block(n2));
-}
-
-
-TEST(TestScheduleAddGoto) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- BasicBlock* entry = schedule.start();
- BasicBlock* next = schedule.NewBasicBlock();
-
- schedule.AddGoto(entry, next);
-
- CHECK_EQ(0, static_cast<int>(entry->PredecessorCount()));
- CHECK_EQ(1, static_cast<int>(entry->SuccessorCount()));
- CHECK_EQ(next, entry->SuccessorAt(0));
-
- CHECK_EQ(1, static_cast<int>(next->PredecessorCount()));
- CHECK_EQ(entry, next->PredecessorAt(0));
- CHECK_EQ(0, static_cast<int>(next->SuccessorCount()));
-}
-
-
-TEST(TestScheduleAddBranch) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* b = graph.NewNode(common.Branch(), n0);
-
- BasicBlock* entry = schedule.start();
- BasicBlock* tblock = schedule.NewBasicBlock();
- BasicBlock* fblock = schedule.NewBasicBlock();
-
- schedule.AddBranch(entry, b, tblock, fblock);
-
- CHECK_EQ(0, static_cast<int>(entry->PredecessorCount()));
- CHECK_EQ(2, static_cast<int>(entry->SuccessorCount()));
- CHECK_EQ(tblock, entry->SuccessorAt(0));
- CHECK_EQ(fblock, entry->SuccessorAt(1));
-
- CHECK_EQ(1, static_cast<int>(tblock->PredecessorCount()));
- CHECK_EQ(entry, tblock->PredecessorAt(0));
- CHECK_EQ(0, static_cast<int>(tblock->SuccessorCount()));
-
- CHECK_EQ(1, static_cast<int>(fblock->PredecessorCount()));
- CHECK_EQ(entry, fblock->PredecessorAt(0));
- CHECK_EQ(0, static_cast<int>(fblock->SuccessorCount()));
-}
-
-
-TEST(TestScheduleAddReturn) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- Graph graph(scope.main_zone());
- Node* n0 = graph.NewNode(&dummy_operator);
- BasicBlock* entry = schedule.start();
- schedule.AddReturn(entry, n0);
-
- CHECK_EQ(0, static_cast<int>(entry->PredecessorCount()));
- CHECK_EQ(1, static_cast<int>(entry->SuccessorCount()));
- CHECK_EQ(schedule.end(), entry->SuccessorAt(0));
-}
-
-
-TEST(TestScheduleAddThrow) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- Graph graph(scope.main_zone());
- Node* n0 = graph.NewNode(&dummy_operator);
- BasicBlock* entry = schedule.start();
- schedule.AddThrow(entry, n0);
-
- CHECK_EQ(0, static_cast<int>(entry->PredecessorCount()));
- CHECK_EQ(1, static_cast<int>(entry->SuccessorCount()));
- CHECK_EQ(schedule.end(), entry->SuccessorAt(0));
-}
-
-
-TEST(TestScheduleInsertBranch) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* b = graph.NewNode(common.Branch(), n1);
-
- BasicBlock* entry = schedule.start();
- BasicBlock* tblock = schedule.NewBasicBlock();
- BasicBlock* fblock = schedule.NewBasicBlock();
- BasicBlock* merge = schedule.NewBasicBlock();
- schedule.AddReturn(entry, n0);
- schedule.AddGoto(tblock, merge);
- schedule.AddGoto(fblock, merge);
-
- schedule.InsertBranch(entry, merge, b, tblock, fblock);
-
- CHECK_EQ(0, static_cast<int>(entry->PredecessorCount()));
- CHECK_EQ(2, static_cast<int>(entry->SuccessorCount()));
- CHECK_EQ(tblock, entry->SuccessorAt(0));
- CHECK_EQ(fblock, entry->SuccessorAt(1));
-
- CHECK_EQ(2, static_cast<int>(merge->PredecessorCount()));
- CHECK_EQ(1, static_cast<int>(merge->SuccessorCount()));
- CHECK_EQ(schedule.end(), merge->SuccessorAt(0));
-
- CHECK_EQ(1, static_cast<int>(schedule.end()->PredecessorCount()));
- CHECK_EQ(0, static_cast<int>(schedule.end()->SuccessorCount()));
- CHECK_EQ(merge, schedule.end()->PredecessorAt(0));
-}
-
-
-TEST(BuildMulNodeGraph) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
- // TODO(titzer): use test operators.
- MachineOperatorBuilder machine(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(0));
- graph.SetStart(start);
- Node* param0 = graph.NewNode(common.Parameter(0), graph.start());
- Node* param1 = graph.NewNode(common.Parameter(1), graph.start());
-
- Node* mul = graph.NewNode(machine.Int32Mul(), param0, param1);
- Node* ret = graph.NewNode(common.Return(), mul, start);
-
- USE(ret);
-}
diff --git a/deps/v8/test/cctest/compiler/test-scheduler.cc b/deps/v8/test/cctest/compiler/test-scheduler.cc
deleted file mode 100644
index 1b79ed5449..0000000000
--- a/deps/v8/test/cctest/compiler/test-scheduler.cc
+++ /dev/null
@@ -1,2124 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/compiler/access-builder.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/node.h"
-#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
-#include "src/compiler/schedule.h"
-#include "src/compiler/scheduler.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/verifier.h"
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-
-Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0, 0, 1,
- 0, 0);
-
-// TODO(titzer): pull RPO tests out to their own file.
-static void CheckRPONumbers(BasicBlockVector* order, size_t expected,
- bool loops_allowed) {
- CHECK(expected == order->size());
- for (int i = 0; i < static_cast<int>(order->size()); i++) {
- CHECK(order->at(i)->rpo_number() == i);
- if (!loops_allowed) {
- CHECK_EQ(NULL, order->at(i)->loop_end());
- CHECK_EQ(NULL, order->at(i)->loop_header());
- }
- }
-}
-
-
-static void CheckLoop(BasicBlockVector* order, BasicBlock** blocks,
- int body_size) {
- BasicBlock* header = blocks[0];
- BasicBlock* end = header->loop_end();
- CHECK_NE(NULL, end);
- CHECK_GT(end->rpo_number(), 0);
- CHECK_EQ(body_size, end->rpo_number() - header->rpo_number());
- for (int i = 0; i < body_size; i++) {
- CHECK_GE(blocks[i]->rpo_number(), header->rpo_number());
- CHECK_LT(blocks[i]->rpo_number(), end->rpo_number());
- CHECK(header->LoopContains(blocks[i]));
- CHECK(header->IsLoopHeader() || blocks[i]->loop_header() == header);
- }
- if (header->rpo_number() > 0) {
- CHECK_NE(order->at(header->rpo_number() - 1)->loop_header(), header);
- }
- if (end->rpo_number() < static_cast<int>(order->size())) {
- CHECK_NE(order->at(end->rpo_number())->loop_header(), header);
- }
-}
-
-
-struct TestLoop {
- int count;
- BasicBlock** nodes;
- BasicBlock* header() { return nodes[0]; }
- BasicBlock* last() { return nodes[count - 1]; }
- ~TestLoop() { delete[] nodes; }
-
- void Check(BasicBlockVector* order) { CheckLoop(order, nodes, count); }
-};
-
-
-static TestLoop* CreateLoop(Schedule* schedule, int count) {
- TestLoop* loop = new TestLoop();
- loop->count = count;
- loop->nodes = new BasicBlock* [count];
- for (int i = 0; i < count; i++) {
- loop->nodes[i] = schedule->NewBasicBlock();
- if (i > 0) {
- schedule->AddSuccessorForTesting(loop->nodes[i - 1], loop->nodes[i]);
- }
- }
- schedule->AddSuccessorForTesting(loop->nodes[count - 1], loop->nodes[0]);
- return loop;
-}
-
-
-static int GetScheduledNodeCount(Schedule* schedule) {
- int node_count = 0;
- for (BasicBlockVectorIter i = schedule->rpo_order()->begin();
- i != schedule->rpo_order()->end(); ++i) {
- BasicBlock* block = *i;
- for (BasicBlock::const_iterator j = block->begin(); j != block->end();
- ++j) {
- ++node_count;
- }
- BasicBlock::Control control = block->control();
- if (control != BasicBlock::kNone) {
- ++node_count;
- }
- }
- return node_count;
-}
-
-
-static Schedule* ComputeAndVerifySchedule(int expected, Graph* graph) {
- if (FLAG_trace_turbo) {
- OFStream os(stdout);
- os << AsDOT(*graph);
- }
-
- Schedule* schedule = Scheduler::ComputeSchedule(graph->zone(), graph);
-
- if (FLAG_trace_turbo_scheduler) {
- OFStream os(stdout);
- os << *schedule << std::endl;
- }
- ScheduleVerifier::Run(schedule);
- CHECK_EQ(expected, GetScheduledNodeCount(schedule));
- return schedule;
-}
-
-
-TEST(RPODegenerate1) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 1, false);
- CHECK_EQ(schedule.start(), order->at(0));
-}
-
-
-TEST(RPODegenerate2) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- schedule.AddGoto(schedule.start(), schedule.end());
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 2, false);
- CHECK_EQ(schedule.start(), order->at(0));
- CHECK_EQ(schedule.end(), order->at(1));
-}
-
-
-TEST(RPOLine) {
- HandleAndZoneScope scope;
-
- for (int i = 0; i < 10; i++) {
- Schedule schedule(scope.main_zone());
-
- BasicBlock* last = schedule.start();
- for (int j = 0; j < i; j++) {
- BasicBlock* block = schedule.NewBasicBlock();
- block->set_deferred(i & 1);
- schedule.AddGoto(last, block);
- last = block;
- }
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 1 + i, false);
-
- for (size_t i = 0; i < schedule.BasicBlockCount(); i++) {
- BasicBlock* block = schedule.GetBlockById(BasicBlock::Id::FromSize(i));
- if (block->rpo_number() >= 0 && block->SuccessorCount() == 1) {
- CHECK(block->rpo_number() + 1 == block->SuccessorAt(0)->rpo_number());
- }
- }
- }
-}
-
-
-TEST(RPOSelfLoop) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- schedule.AddSuccessorForTesting(schedule.start(), schedule.start());
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 1, true);
- BasicBlock* loop[] = {schedule.start()};
- CheckLoop(order, loop, 1);
-}
-
-
-TEST(RPOEntryLoop) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- BasicBlock* body = schedule.NewBasicBlock();
- schedule.AddSuccessorForTesting(schedule.start(), body);
- schedule.AddSuccessorForTesting(body, schedule.start());
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 2, true);
- BasicBlock* loop[] = {schedule.start(), body};
- CheckLoop(order, loop, 2);
-}
-
-
-TEST(RPOEndLoop) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
- schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 3, true);
- loop1->Check(order);
-}
-
-
-TEST(RPOEndLoopNested) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
- schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 3, true);
- loop1->Check(order);
-}
-
-
-TEST(RPODiamond) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(A, C);
- schedule.AddSuccessorForTesting(B, D);
- schedule.AddSuccessorForTesting(C, D);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 4, false);
-
- CHECK_EQ(0, A->rpo_number());
- CHECK((B->rpo_number() == 1 && C->rpo_number() == 2) ||
- (B->rpo_number() == 2 && C->rpo_number() == 1));
- CHECK_EQ(3, D->rpo_number());
-}
-
-
-TEST(RPOLoop1) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, B);
- schedule.AddSuccessorForTesting(C, D);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 4, true);
- BasicBlock* loop[] = {B, C};
- CheckLoop(order, loop, 2);
-}
-
-
-TEST(RPOLoop2) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, B);
- schedule.AddSuccessorForTesting(B, D);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 4, true);
- BasicBlock* loop[] = {B, C};
- CheckLoop(order, loop, 2);
-}
-
-
-TEST(RPOLoopN) {
- HandleAndZoneScope scope;
-
- for (int i = 0; i < 11; i++) {
- Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.NewBasicBlock();
- BasicBlock* F = schedule.NewBasicBlock();
- BasicBlock* G = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, D);
- schedule.AddSuccessorForTesting(D, E);
- schedule.AddSuccessorForTesting(E, F);
- schedule.AddSuccessorForTesting(F, B);
- schedule.AddSuccessorForTesting(B, G);
-
- // Throw in extra backedges from time to time.
- if (i == 1) schedule.AddSuccessorForTesting(B, B);
- if (i == 2) schedule.AddSuccessorForTesting(C, B);
- if (i == 3) schedule.AddSuccessorForTesting(D, B);
- if (i == 4) schedule.AddSuccessorForTesting(E, B);
- if (i == 5) schedule.AddSuccessorForTesting(F, B);
-
- // Throw in extra loop exits from time to time.
- if (i == 6) schedule.AddSuccessorForTesting(B, G);
- if (i == 7) schedule.AddSuccessorForTesting(C, G);
- if (i == 8) schedule.AddSuccessorForTesting(D, G);
- if (i == 9) schedule.AddSuccessorForTesting(E, G);
- if (i == 10) schedule.AddSuccessorForTesting(F, G);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 7, true);
- BasicBlock* loop[] = {B, C, D, E, F};
- CheckLoop(order, loop, 5);
- }
-}
-
-
-TEST(RPOLoopNest1) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.NewBasicBlock();
- BasicBlock* F = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, D);
- schedule.AddSuccessorForTesting(D, C);
- schedule.AddSuccessorForTesting(D, E);
- schedule.AddSuccessorForTesting(E, B);
- schedule.AddSuccessorForTesting(E, F);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 6, true);
- BasicBlock* loop1[] = {B, C, D, E};
- CheckLoop(order, loop1, 4);
-
- BasicBlock* loop2[] = {C, D};
- CheckLoop(order, loop2, 2);
-}
-
-
-TEST(RPOLoopNest2) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.NewBasicBlock();
- BasicBlock* F = schedule.NewBasicBlock();
- BasicBlock* G = schedule.NewBasicBlock();
- BasicBlock* H = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, D);
- schedule.AddSuccessorForTesting(D, E);
- schedule.AddSuccessorForTesting(E, F);
- schedule.AddSuccessorForTesting(F, G);
- schedule.AddSuccessorForTesting(G, H);
-
- schedule.AddSuccessorForTesting(E, D);
- schedule.AddSuccessorForTesting(F, C);
- schedule.AddSuccessorForTesting(G, B);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 8, true);
- BasicBlock* loop1[] = {B, C, D, E, F, G};
- CheckLoop(order, loop1, 6);
-
- BasicBlock* loop2[] = {C, D, E, F};
- CheckLoop(order, loop2, 4);
-
- BasicBlock* loop3[] = {D, E};
- CheckLoop(order, loop3, 2);
-}
-
-
-TEST(RPOLoopFollow1) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
-
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
-
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
- schedule.AddSuccessorForTesting(loop2->last(), E);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
-
- CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
- static_cast<int>(order->size()));
-
- loop1->Check(order);
- loop2->Check(order);
-}
-
-
-TEST(RPOLoopFollow2) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
-
- BasicBlock* A = schedule.start();
- BasicBlock* S = schedule.NewBasicBlock();
- BasicBlock* E = schedule.end();
-
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->header(), S);
- schedule.AddSuccessorForTesting(S, loop2->header());
- schedule.AddSuccessorForTesting(loop2->last(), E);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
-
- CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
- static_cast<int>(order->size()));
- loop1->Check(order);
- loop2->Check(order);
-}
-
-
-TEST(RPOLoopFollowN) {
- HandleAndZoneScope scope;
-
- for (int size = 1; size < 5; size++) {
- for (int exit = 0; exit < size; exit++) {
- Schedule schedule(scope.main_zone());
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
-
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->nodes[exit], loop2->header());
- schedule.AddSuccessorForTesting(loop2->nodes[exit], E);
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
-
- CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
- static_cast<int>(order->size()));
- loop1->Check(order);
- loop2->Check(order);
- }
- }
-}
-
-
-TEST(RPONestedLoopFollow1) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* E = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, loop1->header());
- schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
- schedule.AddSuccessorForTesting(loop2->last(), C);
- schedule.AddSuccessorForTesting(C, E);
- schedule.AddSuccessorForTesting(C, B);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
-
- CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
- static_cast<int>(order->size()));
- loop1->Check(order);
- loop2->Check(order);
-
- BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
- CheckLoop(order, loop3, 4);
-}
-
-
-TEST(RPOLoopBackedges1) {
- HandleAndZoneScope scope;
-
- int size = 8;
- for (int i = 0; i < size; i++) {
- for (int j = 0; j < size; j++) {
- Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
-
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), E);
-
- schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
- schedule.AddSuccessorForTesting(loop1->nodes[j], E);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- loop1->Check(order);
- }
- }
-}
-
-
-TEST(RPOLoopOutedges1) {
- HandleAndZoneScope scope;
-
- int size = 8;
- for (int i = 0; i < size; i++) {
- for (int j = 0; j < size; j++) {
- Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.start();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.end();
-
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), E);
-
- schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
- schedule.AddSuccessorForTesting(loop1->nodes[j], D);
- schedule.AddSuccessorForTesting(D, E);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- loop1->Check(order);
- }
- }
-}
-
-
-TEST(RPOLoopOutedges2) {
- HandleAndZoneScope scope;
-
- int size = 8;
- for (int i = 0; i < size; i++) {
- Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
-
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), E);
-
- for (int j = 0; j < size; j++) {
- BasicBlock* O = schedule.NewBasicBlock();
- schedule.AddSuccessorForTesting(loop1->nodes[j], O);
- schedule.AddSuccessorForTesting(O, E);
- }
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- loop1->Check(order);
- }
-}
-
-
-TEST(RPOLoopOutloops1) {
- HandleAndZoneScope scope;
-
- int size = 8;
- for (int i = 0; i < size; i++) {
- Schedule schedule(scope.main_zone());
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), E);
-
- TestLoop** loopN = new TestLoop* [size];
- for (int j = 0; j < size; j++) {
- loopN[j] = CreateLoop(&schedule, 2);
- schedule.AddSuccessorForTesting(loop1->nodes[j], loopN[j]->header());
- schedule.AddSuccessorForTesting(loopN[j]->last(), E);
- }
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- loop1->Check(order);
-
- for (int j = 0; j < size; j++) {
- loopN[j]->Check(order);
- delete loopN[j];
- }
- delete[] loopN;
- }
-}
-
-
-TEST(RPOLoopMultibackedge) {
- HandleAndZoneScope scope;
- Schedule schedule(scope.main_zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.NewBasicBlock();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(B, D);
- schedule.AddSuccessorForTesting(B, E);
- schedule.AddSuccessorForTesting(C, B);
- schedule.AddSuccessorForTesting(D, B);
- schedule.AddSuccessorForTesting(E, B);
-
- BasicBlockVector* order =
- Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
- CheckRPONumbers(order, 5, true);
-
- BasicBlock* loop1[] = {B, C, D, E};
- CheckLoop(order, loop1, 4);
-}
-
-
-TEST(BuildScheduleEmpty) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder builder(scope.main_zone());
- graph.SetStart(graph.NewNode(builder.Start(0)));
- graph.SetEnd(graph.NewNode(builder.End(), graph.start()));
-
- USE(Scheduler::ComputeSchedule(scope.main_zone(), &graph));
-}
-
-
-TEST(BuildScheduleOneParameter) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder builder(scope.main_zone());
- graph.SetStart(graph.NewNode(builder.Start(0)));
-
- Node* p1 = graph.NewNode(builder.Parameter(0), graph.start());
- Node* ret = graph.NewNode(builder.Return(), p1, graph.start(), graph.start());
-
- graph.SetEnd(graph.NewNode(builder.End(), ret));
-
- USE(Scheduler::ComputeSchedule(scope.main_zone(), &graph));
-}
-
-
-TEST(BuildScheduleIfSplit) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder builder(scope.main_zone());
- JSOperatorBuilder js_builder(scope.main_zone());
- graph.SetStart(graph.NewNode(builder.Start(3)));
-
- Node* p1 = graph.NewNode(builder.Parameter(0), graph.start());
- Node* p2 = graph.NewNode(builder.Parameter(1), graph.start());
- Node* p3 = graph.NewNode(builder.Parameter(2), graph.start());
- Node* p4 = graph.NewNode(builder.Parameter(3), graph.start());
- Node* p5 = graph.NewNode(builder.Parameter(4), graph.start());
- Node* cmp = graph.NewNode(js_builder.LessThanOrEqual(), p1, p2, p3,
- graph.start(), graph.start());
- Node* branch = graph.NewNode(builder.Branch(), cmp, graph.start());
- Node* true_branch = graph.NewNode(builder.IfTrue(), branch);
- Node* false_branch = graph.NewNode(builder.IfFalse(), branch);
-
- Node* ret1 = graph.NewNode(builder.Return(), p4, graph.start(), true_branch);
- Node* ret2 = graph.NewNode(builder.Return(), p5, graph.start(), false_branch);
- Node* merge = graph.NewNode(builder.Merge(2), ret1, ret2);
- graph.SetEnd(graph.NewNode(builder.End(), merge));
-
- ComputeAndVerifySchedule(13, &graph);
-}
-
-
-TEST(BuildScheduleIfSplitWithEffects) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common_builder(scope.main_zone());
- JSOperatorBuilder js_builder(scope.main_zone());
- const Operator* op;
-
- Handle<HeapObject> object =
- Handle<HeapObject>(isolate->heap()->undefined_value(), isolate);
- Unique<HeapObject> unique_constant =
- Unique<HeapObject>::CreateUninitialized(object);
-
- // Manually transcripted code for:
- // function turbo_fan_test(a, b, c, y) {
- // if (a < b) {
- // return a + b - c * c - a + y;
- // } else {
- // return c * c - a;
- // }
- // }
- op = common_builder.Start(0);
- Node* n0 = graph.NewNode(op);
- USE(n0);
- Node* nil = graph.NewNode(common_builder.Dead());
- op = common_builder.End();
- Node* n23 = graph.NewNode(op, nil);
- USE(n23);
- op = common_builder.Merge(2);
- Node* n22 = graph.NewNode(op, nil, nil);
- USE(n22);
- op = common_builder.Return();
- Node* n16 = graph.NewNode(op, nil, nil, nil);
- USE(n16);
- op = js_builder.Add();
- Node* n15 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n15);
- op = js_builder.Subtract();
- Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n14);
- op = js_builder.Subtract();
- Node* n13 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n13);
- op = js_builder.Add();
- Node* n11 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n11);
- op = common_builder.Parameter(0);
- Node* n2 = graph.NewNode(op, n0);
- USE(n2);
- n11->ReplaceInput(0, n2);
- op = common_builder.Parameter(0);
- Node* n3 = graph.NewNode(op, n0);
- USE(n3);
- n11->ReplaceInput(1, n3);
- op = common_builder.HeapConstant(unique_constant);
- Node* n7 = graph.NewNode(op);
- USE(n7);
- n11->ReplaceInput(2, n7);
- op = js_builder.LessThan();
- Node* n8 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n8);
- n8->ReplaceInput(0, n2);
- n8->ReplaceInput(1, n3);
- n8->ReplaceInput(2, n7);
- n8->ReplaceInput(3, n0);
- n8->ReplaceInput(4, n0);
- n11->ReplaceInput(3, n8);
- op = common_builder.IfTrue();
- Node* n10 = graph.NewNode(op, nil);
- USE(n10);
- op = common_builder.Branch();
- Node* n9 = graph.NewNode(op, nil, nil);
- USE(n9);
- n9->ReplaceInput(0, n8);
- n9->ReplaceInput(1, n0);
- n10->ReplaceInput(0, n9);
- n11->ReplaceInput(4, n10);
- n13->ReplaceInput(0, n11);
- op = js_builder.Multiply();
- Node* n12 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n12);
- op = common_builder.Parameter(0);
- Node* n4 = graph.NewNode(op, n0);
- USE(n4);
- n12->ReplaceInput(0, n4);
- n12->ReplaceInput(1, n4);
- n12->ReplaceInput(2, n7);
- n12->ReplaceInput(3, n11);
- n12->ReplaceInput(4, n10);
- n13->ReplaceInput(1, n12);
- n13->ReplaceInput(2, n7);
- n13->ReplaceInput(3, n12);
- n13->ReplaceInput(4, n10);
- n14->ReplaceInput(0, n13);
- n14->ReplaceInput(1, n2);
- n14->ReplaceInput(2, n7);
- n14->ReplaceInput(3, n13);
- n14->ReplaceInput(4, n10);
- n15->ReplaceInput(0, n14);
- op = common_builder.Parameter(0);
- Node* n5 = graph.NewNode(op, n0);
- USE(n5);
- n15->ReplaceInput(1, n5);
- n15->ReplaceInput(2, n7);
- n15->ReplaceInput(3, n14);
- n15->ReplaceInput(4, n10);
- n16->ReplaceInput(0, n15);
- n16->ReplaceInput(1, n15);
- n16->ReplaceInput(2, n10);
- n22->ReplaceInput(0, n16);
- op = common_builder.Return();
- Node* n21 = graph.NewNode(op, nil, nil, nil);
- USE(n21);
- op = js_builder.Subtract();
- Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n20);
- op = js_builder.Multiply();
- Node* n19 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n19);
- n19->ReplaceInput(0, n4);
- n19->ReplaceInput(1, n4);
- n19->ReplaceInput(2, n7);
- n19->ReplaceInput(3, n8);
- op = common_builder.IfFalse();
- Node* n18 = graph.NewNode(op, nil);
- USE(n18);
- n18->ReplaceInput(0, n9);
- n19->ReplaceInput(4, n18);
- n20->ReplaceInput(0, n19);
- n20->ReplaceInput(1, n2);
- n20->ReplaceInput(2, n7);
- n20->ReplaceInput(3, n19);
- n20->ReplaceInput(4, n18);
- n21->ReplaceInput(0, n20);
- n21->ReplaceInput(1, n20);
- n21->ReplaceInput(2, n18);
- n22->ReplaceInput(1, n21);
- n23->ReplaceInput(0, n22);
-
- graph.SetStart(n0);
- graph.SetEnd(n23);
-
- ComputeAndVerifySchedule(20, &graph);
-}
-
-
-TEST(BuildScheduleSimpleLoop) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common_builder(scope.main_zone());
- JSOperatorBuilder js_builder(scope.main_zone());
- const Operator* op;
-
- Handle<HeapObject> object =
- Handle<HeapObject>(isolate->heap()->undefined_value(), isolate);
- Unique<HeapObject> unique_constant =
- Unique<HeapObject>::CreateUninitialized(object);
-
- // Manually transcripted code for:
- // function turbo_fan_test(a, b) {
- // while (a < b) {
- // a++;
- // }
- // return a;
- // }
- op = common_builder.Start(0);
- Node* n0 = graph.NewNode(op);
- USE(n0);
- Node* nil = graph.NewNode(common_builder.Dead());
- op = common_builder.End();
- Node* n20 = graph.NewNode(op, nil);
- USE(n20);
- op = common_builder.Return();
- Node* n19 = graph.NewNode(op, nil, nil, nil);
- USE(n19);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n8 = graph.NewNode(op, nil, nil, nil);
- USE(n8);
- op = common_builder.Parameter(0);
- Node* n2 = graph.NewNode(op, n0);
- USE(n2);
- n8->ReplaceInput(0, n2);
- op = js_builder.Add();
- Node* n18 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n18);
- op = js_builder.ToNumber();
- Node* n16 = graph.NewNode(op, nil, nil, nil, nil);
- USE(n16);
- n16->ReplaceInput(0, n8);
- op = common_builder.HeapConstant(unique_constant);
- Node* n5 = graph.NewNode(op);
- USE(n5);
- n16->ReplaceInput(1, n5);
- op = js_builder.LessThan();
- Node* n12 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n12);
- n12->ReplaceInput(0, n8);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n9 = graph.NewNode(op, nil, nil, nil);
- USE(n9);
- op = common_builder.Parameter(0);
- Node* n3 = graph.NewNode(op, n0);
- USE(n3);
- n9->ReplaceInput(0, n3);
- n9->ReplaceInput(1, n9);
- op = common_builder.Loop(2);
- Node* n6 = graph.NewNode(op, nil, nil);
- USE(n6);
- n6->ReplaceInput(0, n0);
- op = common_builder.IfTrue();
- Node* n14 = graph.NewNode(op, nil);
- USE(n14);
- op = common_builder.Branch();
- Node* n13 = graph.NewNode(op, nil, nil);
- USE(n13);
- n13->ReplaceInput(0, n12);
- n13->ReplaceInput(1, n6);
- n14->ReplaceInput(0, n13);
- n6->ReplaceInput(1, n14);
- n9->ReplaceInput(2, n6);
- n12->ReplaceInput(1, n9);
- n12->ReplaceInput(2, n5);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n10 = graph.NewNode(op, nil, nil, nil);
- USE(n10);
- n10->ReplaceInput(0, n0);
- n10->ReplaceInput(1, n18);
- n10->ReplaceInput(2, n6);
- n12->ReplaceInput(3, n10);
- n12->ReplaceInput(4, n6);
- n16->ReplaceInput(2, n12);
- n16->ReplaceInput(3, n14);
- n18->ReplaceInput(0, n16);
- op = common_builder.NumberConstant(0);
- Node* n17 = graph.NewNode(op);
- USE(n17);
- n18->ReplaceInput(1, n17);
- n18->ReplaceInput(2, n5);
- n18->ReplaceInput(3, n16);
- n18->ReplaceInput(4, n14);
- n8->ReplaceInput(1, n18);
- n8->ReplaceInput(2, n6);
- n19->ReplaceInput(0, n8);
- n19->ReplaceInput(1, n12);
- op = common_builder.IfFalse();
- Node* n15 = graph.NewNode(op, nil);
- USE(n15);
- n15->ReplaceInput(0, n13);
- n19->ReplaceInput(2, n15);
- n20->ReplaceInput(0, n19);
-
- graph.SetStart(n0);
- graph.SetEnd(n20);
-
- ComputeAndVerifySchedule(19, &graph);
-}
-
-
-TEST(BuildScheduleComplexLoops) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common_builder(scope.main_zone());
- JSOperatorBuilder js_builder(scope.main_zone());
- const Operator* op;
-
- Handle<HeapObject> object =
- Handle<HeapObject>(isolate->heap()->undefined_value(), isolate);
- Unique<HeapObject> unique_constant =
- Unique<HeapObject>::CreateUninitialized(object);
-
- // Manually transcripted code for:
- // function turbo_fan_test(a, b, c) {
- // while (a < b) {
- // a++;
- // while (c < b) {
- // c++;
- // }
- // }
- // while (a < b) {
- // a += 2;
- // }
- // return a;
- // }
- op = common_builder.Start(0);
- Node* n0 = graph.NewNode(op);
- USE(n0);
- Node* nil = graph.NewNode(common_builder.Dead());
- op = common_builder.End();
- Node* n46 = graph.NewNode(op, nil);
- USE(n46);
- op = common_builder.Return();
- Node* n45 = graph.NewNode(op, nil, nil, nil);
- USE(n45);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n35 = graph.NewNode(op, nil, nil, nil);
- USE(n35);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n9 = graph.NewNode(op, nil, nil, nil);
- USE(n9);
- op = common_builder.Parameter(0);
- Node* n2 = graph.NewNode(op, n0);
- USE(n2);
- n9->ReplaceInput(0, n2);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n23 = graph.NewNode(op, nil, nil, nil);
- USE(n23);
- op = js_builder.Add();
- Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n20);
- op = js_builder.ToNumber();
- Node* n18 = graph.NewNode(op, nil, nil, nil, nil);
- USE(n18);
- n18->ReplaceInput(0, n9);
- op = common_builder.HeapConstant(unique_constant);
- Node* n6 = graph.NewNode(op);
- USE(n6);
- n18->ReplaceInput(1, n6);
- op = js_builder.LessThan();
- Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n14);
- n14->ReplaceInput(0, n9);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n10 = graph.NewNode(op, nil, nil, nil);
- USE(n10);
- op = common_builder.Parameter(0);
- Node* n3 = graph.NewNode(op, n0);
- USE(n3);
- n10->ReplaceInput(0, n3);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n24 = graph.NewNode(op, nil, nil, nil);
- USE(n24);
- n24->ReplaceInput(0, n10);
- n24->ReplaceInput(1, n24);
- op = common_builder.Loop(2);
- Node* n21 = graph.NewNode(op, nil, nil);
- USE(n21);
- op = common_builder.IfTrue();
- Node* n16 = graph.NewNode(op, nil);
- USE(n16);
- op = common_builder.Branch();
- Node* n15 = graph.NewNode(op, nil, nil);
- USE(n15);
- n15->ReplaceInput(0, n14);
- op = common_builder.Loop(2);
- Node* n7 = graph.NewNode(op, nil, nil);
- USE(n7);
- n7->ReplaceInput(0, n0);
- op = common_builder.IfFalse();
- Node* n30 = graph.NewNode(op, nil);
- USE(n30);
- op = common_builder.Branch();
- Node* n28 = graph.NewNode(op, nil, nil);
- USE(n28);
- op = js_builder.LessThan();
- Node* n27 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n27);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n25 = graph.NewNode(op, nil, nil, nil);
- USE(n25);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n11 = graph.NewNode(op, nil, nil, nil);
- USE(n11);
- op = common_builder.Parameter(0);
- Node* n4 = graph.NewNode(op, n0);
- USE(n4);
- n11->ReplaceInput(0, n4);
- n11->ReplaceInput(1, n25);
- n11->ReplaceInput(2, n7);
- n25->ReplaceInput(0, n11);
- op = js_builder.Add();
- Node* n32 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n32);
- op = js_builder.ToNumber();
- Node* n31 = graph.NewNode(op, nil, nil, nil, nil);
- USE(n31);
- n31->ReplaceInput(0, n25);
- n31->ReplaceInput(1, n6);
- n31->ReplaceInput(2, n27);
- op = common_builder.IfTrue();
- Node* n29 = graph.NewNode(op, nil);
- USE(n29);
- n29->ReplaceInput(0, n28);
- n31->ReplaceInput(3, n29);
- n32->ReplaceInput(0, n31);
- op = common_builder.NumberConstant(0);
- Node* n19 = graph.NewNode(op);
- USE(n19);
- n32->ReplaceInput(1, n19);
- n32->ReplaceInput(2, n6);
- n32->ReplaceInput(3, n31);
- n32->ReplaceInput(4, n29);
- n25->ReplaceInput(1, n32);
- n25->ReplaceInput(2, n21);
- n27->ReplaceInput(0, n25);
- n27->ReplaceInput(1, n24);
- n27->ReplaceInput(2, n6);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n26 = graph.NewNode(op, nil, nil, nil);
- USE(n26);
- n26->ReplaceInput(0, n20);
- n26->ReplaceInput(1, n32);
- n26->ReplaceInput(2, n21);
- n27->ReplaceInput(3, n26);
- n27->ReplaceInput(4, n21);
- n28->ReplaceInput(0, n27);
- n28->ReplaceInput(1, n21);
- n30->ReplaceInput(0, n28);
- n7->ReplaceInput(1, n30);
- n15->ReplaceInput(1, n7);
- n16->ReplaceInput(0, n15);
- n21->ReplaceInput(0, n16);
- n21->ReplaceInput(1, n29);
- n24->ReplaceInput(2, n21);
- n10->ReplaceInput(1, n24);
- n10->ReplaceInput(2, n7);
- n14->ReplaceInput(1, n10);
- n14->ReplaceInput(2, n6);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n12 = graph.NewNode(op, nil, nil, nil);
- USE(n12);
- n12->ReplaceInput(0, n0);
- n12->ReplaceInput(1, n27);
- n12->ReplaceInput(2, n7);
- n14->ReplaceInput(3, n12);
- n14->ReplaceInput(4, n7);
- n18->ReplaceInput(2, n14);
- n18->ReplaceInput(3, n16);
- n20->ReplaceInput(0, n18);
- n20->ReplaceInput(1, n19);
- n20->ReplaceInput(2, n6);
- n20->ReplaceInput(3, n18);
- n20->ReplaceInput(4, n16);
- n23->ReplaceInput(0, n20);
- n23->ReplaceInput(1, n23);
- n23->ReplaceInput(2, n21);
- n9->ReplaceInput(1, n23);
- n9->ReplaceInput(2, n7);
- n35->ReplaceInput(0, n9);
- op = js_builder.Add();
- Node* n44 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n44);
- n44->ReplaceInput(0, n35);
- op = common_builder.NumberConstant(0);
- Node* n43 = graph.NewNode(op);
- USE(n43);
- n44->ReplaceInput(1, n43);
- n44->ReplaceInput(2, n6);
- op = js_builder.LessThan();
- Node* n39 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n39);
- n39->ReplaceInput(0, n35);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n36 = graph.NewNode(op, nil, nil, nil);
- USE(n36);
- n36->ReplaceInput(0, n10);
- n36->ReplaceInput(1, n36);
- op = common_builder.Loop(2);
- Node* n33 = graph.NewNode(op, nil, nil);
- USE(n33);
- op = common_builder.IfFalse();
- Node* n17 = graph.NewNode(op, nil);
- USE(n17);
- n17->ReplaceInput(0, n15);
- n33->ReplaceInput(0, n17);
- op = common_builder.IfTrue();
- Node* n41 = graph.NewNode(op, nil);
- USE(n41);
- op = common_builder.Branch();
- Node* n40 = graph.NewNode(op, nil, nil);
- USE(n40);
- n40->ReplaceInput(0, n39);
- n40->ReplaceInput(1, n33);
- n41->ReplaceInput(0, n40);
- n33->ReplaceInput(1, n41);
- n36->ReplaceInput(2, n33);
- n39->ReplaceInput(1, n36);
- n39->ReplaceInput(2, n6);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n38 = graph.NewNode(op, nil, nil, nil);
- USE(n38);
- n38->ReplaceInput(0, n14);
- n38->ReplaceInput(1, n44);
- n38->ReplaceInput(2, n33);
- n39->ReplaceInput(3, n38);
- n39->ReplaceInput(4, n33);
- n44->ReplaceInput(3, n39);
- n44->ReplaceInput(4, n41);
- n35->ReplaceInput(1, n44);
- n35->ReplaceInput(2, n33);
- n45->ReplaceInput(0, n35);
- n45->ReplaceInput(1, n39);
- op = common_builder.IfFalse();
- Node* n42 = graph.NewNode(op, nil);
- USE(n42);
- n42->ReplaceInput(0, n40);
- n45->ReplaceInput(2, n42);
- n46->ReplaceInput(0, n45);
-
- graph.SetStart(n0);
- graph.SetEnd(n46);
-
- ComputeAndVerifySchedule(46, &graph);
-}
-
-
-TEST(BuildScheduleBreakAndContinue) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common_builder(scope.main_zone());
- JSOperatorBuilder js_builder(scope.main_zone());
- const Operator* op;
-
- Handle<HeapObject> object =
- Handle<HeapObject>(isolate->heap()->undefined_value(), isolate);
- Unique<HeapObject> unique_constant =
- Unique<HeapObject>::CreateUninitialized(object);
-
- // Manually transcripted code for:
- // function turbo_fan_test(a, b, c) {
- // var d = 0;
- // while (a < b) {
- // a++;
- // while (c < b) {
- // c++;
- // if (d == 0) break;
- // a++;
- // }
- // if (a == 1) continue;
- // d++;
- // }
- // return a + d;
- // }
- op = common_builder.Start(0);
- Node* n0 = graph.NewNode(op);
- USE(n0);
- Node* nil = graph.NewNode(common_builder.Dead());
- op = common_builder.End();
- Node* n58 = graph.NewNode(op, nil);
- USE(n58);
- op = common_builder.Return();
- Node* n57 = graph.NewNode(op, nil, nil, nil);
- USE(n57);
- op = js_builder.Add();
- Node* n56 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n56);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n10 = graph.NewNode(op, nil, nil, nil);
- USE(n10);
- op = common_builder.Parameter(0);
- Node* n2 = graph.NewNode(op, n0);
- USE(n2);
- n10->ReplaceInput(0, n2);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n25 = graph.NewNode(op, nil, nil, nil);
- USE(n25);
- op = js_builder.Add();
- Node* n22 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n22);
- op = js_builder.ToNumber();
- Node* n20 = graph.NewNode(op, nil, nil, nil, nil);
- USE(n20);
- n20->ReplaceInput(0, n10);
- op = common_builder.HeapConstant(unique_constant);
- Node* n6 = graph.NewNode(op);
- USE(n6);
- n20->ReplaceInput(1, n6);
- op = js_builder.LessThan();
- Node* n16 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n16);
- n16->ReplaceInput(0, n10);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n11 = graph.NewNode(op, nil, nil, nil);
- USE(n11);
- op = common_builder.Parameter(0);
- Node* n3 = graph.NewNode(op, n0);
- USE(n3);
- n11->ReplaceInput(0, n3);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n26 = graph.NewNode(op, nil, nil, nil);
- USE(n26);
- n26->ReplaceInput(0, n11);
- n26->ReplaceInput(1, n26);
- op = common_builder.Loop(2);
- Node* n23 = graph.NewNode(op, nil, nil);
- USE(n23);
- op = common_builder.IfTrue();
- Node* n18 = graph.NewNode(op, nil);
- USE(n18);
- op = common_builder.Branch();
- Node* n17 = graph.NewNode(op, nil, nil);
- USE(n17);
- n17->ReplaceInput(0, n16);
- op = common_builder.Loop(2);
- Node* n8 = graph.NewNode(op, nil, nil);
- USE(n8);
- n8->ReplaceInput(0, n0);
- op = common_builder.Merge(2);
- Node* n53 = graph.NewNode(op, nil, nil);
- USE(n53);
- op = common_builder.IfTrue();
- Node* n49 = graph.NewNode(op, nil);
- USE(n49);
- op = common_builder.Branch();
- Node* n48 = graph.NewNode(op, nil, nil);
- USE(n48);
- op = js_builder.Equal();
- Node* n47 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n47);
- n47->ReplaceInput(0, n25);
- op = common_builder.NumberConstant(0);
- Node* n46 = graph.NewNode(op);
- USE(n46);
- n47->ReplaceInput(1, n46);
- n47->ReplaceInput(2, n6);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n42 = graph.NewNode(op, nil, nil, nil);
- USE(n42);
- op = js_builder.LessThan();
- Node* n30 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n30);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n27 = graph.NewNode(op, nil, nil, nil);
- USE(n27);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n12 = graph.NewNode(op, nil, nil, nil);
- USE(n12);
- op = common_builder.Parameter(0);
- Node* n4 = graph.NewNode(op, n0);
- USE(n4);
- n12->ReplaceInput(0, n4);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n41 = graph.NewNode(op, nil, nil, nil);
- USE(n41);
- n41->ReplaceInput(0, n27);
- op = js_builder.Add();
- Node* n35 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n35);
- op = js_builder.ToNumber();
- Node* n34 = graph.NewNode(op, nil, nil, nil, nil);
- USE(n34);
- n34->ReplaceInput(0, n27);
- n34->ReplaceInput(1, n6);
- n34->ReplaceInput(2, n30);
- op = common_builder.IfTrue();
- Node* n32 = graph.NewNode(op, nil);
- USE(n32);
- op = common_builder.Branch();
- Node* n31 = graph.NewNode(op, nil, nil);
- USE(n31);
- n31->ReplaceInput(0, n30);
- n31->ReplaceInput(1, n23);
- n32->ReplaceInput(0, n31);
- n34->ReplaceInput(3, n32);
- n35->ReplaceInput(0, n34);
- op = common_builder.NumberConstant(0);
- Node* n21 = graph.NewNode(op);
- USE(n21);
- n35->ReplaceInput(1, n21);
- n35->ReplaceInput(2, n6);
- n35->ReplaceInput(3, n34);
- n35->ReplaceInput(4, n32);
- n41->ReplaceInput(1, n35);
- op = common_builder.Merge(2);
- Node* n40 = graph.NewNode(op, nil, nil);
- USE(n40);
- op = common_builder.IfFalse();
- Node* n33 = graph.NewNode(op, nil);
- USE(n33);
- n33->ReplaceInput(0, n31);
- n40->ReplaceInput(0, n33);
- op = common_builder.IfTrue();
- Node* n39 = graph.NewNode(op, nil);
- USE(n39);
- op = common_builder.Branch();
- Node* n38 = graph.NewNode(op, nil, nil);
- USE(n38);
- op = js_builder.Equal();
- Node* n37 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n37);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n28 = graph.NewNode(op, nil, nil, nil);
- USE(n28);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n13 = graph.NewNode(op, nil, nil, nil);
- USE(n13);
- op = common_builder.NumberConstant(0);
- Node* n7 = graph.NewNode(op);
- USE(n7);
- n13->ReplaceInput(0, n7);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n54 = graph.NewNode(op, nil, nil, nil);
- USE(n54);
- n54->ReplaceInput(0, n28);
- op = js_builder.Add();
- Node* n52 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n52);
- op = js_builder.ToNumber();
- Node* n51 = graph.NewNode(op, nil, nil, nil, nil);
- USE(n51);
- n51->ReplaceInput(0, n28);
- n51->ReplaceInput(1, n6);
- n51->ReplaceInput(2, n47);
- op = common_builder.IfFalse();
- Node* n50 = graph.NewNode(op, nil);
- USE(n50);
- n50->ReplaceInput(0, n48);
- n51->ReplaceInput(3, n50);
- n52->ReplaceInput(0, n51);
- n52->ReplaceInput(1, n21);
- n52->ReplaceInput(2, n6);
- n52->ReplaceInput(3, n51);
- n52->ReplaceInput(4, n50);
- n54->ReplaceInput(1, n52);
- n54->ReplaceInput(2, n53);
- n13->ReplaceInput(1, n54);
- n13->ReplaceInput(2, n8);
- n28->ReplaceInput(0, n13);
- n28->ReplaceInput(1, n28);
- n28->ReplaceInput(2, n23);
- n37->ReplaceInput(0, n28);
- op = common_builder.NumberConstant(0);
- Node* n36 = graph.NewNode(op);
- USE(n36);
- n37->ReplaceInput(1, n36);
- n37->ReplaceInput(2, n6);
- n37->ReplaceInput(3, n35);
- n37->ReplaceInput(4, n32);
- n38->ReplaceInput(0, n37);
- n38->ReplaceInput(1, n32);
- n39->ReplaceInput(0, n38);
- n40->ReplaceInput(1, n39);
- n41->ReplaceInput(2, n40);
- n12->ReplaceInput(1, n41);
- n12->ReplaceInput(2, n8);
- n27->ReplaceInput(0, n12);
- n27->ReplaceInput(1, n35);
- n27->ReplaceInput(2, n23);
- n30->ReplaceInput(0, n27);
- n30->ReplaceInput(1, n26);
- n30->ReplaceInput(2, n6);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n29 = graph.NewNode(op, nil, nil, nil);
- USE(n29);
- n29->ReplaceInput(0, n22);
- op = js_builder.Add();
- Node* n45 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n45);
- op = js_builder.ToNumber();
- Node* n44 = graph.NewNode(op, nil, nil, nil, nil);
- USE(n44);
- n44->ReplaceInput(0, n25);
- n44->ReplaceInput(1, n6);
- n44->ReplaceInput(2, n37);
- op = common_builder.IfFalse();
- Node* n43 = graph.NewNode(op, nil);
- USE(n43);
- n43->ReplaceInput(0, n38);
- n44->ReplaceInput(3, n43);
- n45->ReplaceInput(0, n44);
- n45->ReplaceInput(1, n21);
- n45->ReplaceInput(2, n6);
- n45->ReplaceInput(3, n44);
- n45->ReplaceInput(4, n43);
- n29->ReplaceInput(1, n45);
- n29->ReplaceInput(2, n23);
- n30->ReplaceInput(3, n29);
- n30->ReplaceInput(4, n23);
- n42->ReplaceInput(0, n30);
- n42->ReplaceInput(1, n37);
- n42->ReplaceInput(2, n40);
- n47->ReplaceInput(3, n42);
- n47->ReplaceInput(4, n40);
- n48->ReplaceInput(0, n47);
- n48->ReplaceInput(1, n40);
- n49->ReplaceInput(0, n48);
- n53->ReplaceInput(0, n49);
- n53->ReplaceInput(1, n50);
- n8->ReplaceInput(1, n53);
- n17->ReplaceInput(1, n8);
- n18->ReplaceInput(0, n17);
- n23->ReplaceInput(0, n18);
- n23->ReplaceInput(1, n43);
- n26->ReplaceInput(2, n23);
- n11->ReplaceInput(1, n26);
- n11->ReplaceInput(2, n8);
- n16->ReplaceInput(1, n11);
- n16->ReplaceInput(2, n6);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n14 = graph.NewNode(op, nil, nil, nil);
- USE(n14);
- n14->ReplaceInput(0, n0);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n55 = graph.NewNode(op, nil, nil, nil);
- USE(n55);
- n55->ReplaceInput(0, n47);
- n55->ReplaceInput(1, n52);
- n55->ReplaceInput(2, n53);
- n14->ReplaceInput(1, n55);
- n14->ReplaceInput(2, n8);
- n16->ReplaceInput(3, n14);
- n16->ReplaceInput(4, n8);
- n20->ReplaceInput(2, n16);
- n20->ReplaceInput(3, n18);
- n22->ReplaceInput(0, n20);
- n22->ReplaceInput(1, n21);
- n22->ReplaceInput(2, n6);
- n22->ReplaceInput(3, n20);
- n22->ReplaceInput(4, n18);
- n25->ReplaceInput(0, n22);
- n25->ReplaceInput(1, n45);
- n25->ReplaceInput(2, n23);
- n10->ReplaceInput(1, n25);
- n10->ReplaceInput(2, n8);
- n56->ReplaceInput(0, n10);
- n56->ReplaceInput(1, n13);
- n56->ReplaceInput(2, n6);
- n56->ReplaceInput(3, n16);
- op = common_builder.IfFalse();
- Node* n19 = graph.NewNode(op, nil);
- USE(n19);
- n19->ReplaceInput(0, n17);
- n56->ReplaceInput(4, n19);
- n57->ReplaceInput(0, n56);
- n57->ReplaceInput(1, n56);
- n57->ReplaceInput(2, n19);
- n58->ReplaceInput(0, n57);
-
- graph.SetStart(n0);
- graph.SetEnd(n58);
-
- ComputeAndVerifySchedule(62, &graph);
-}
-
-
-TEST(BuildScheduleSimpleLoopWithCodeMotion) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common_builder(scope.main_zone());
- JSOperatorBuilder js_builder(scope.main_zone());
- const Operator* op;
-
- Handle<HeapObject> object =
- Handle<HeapObject>(isolate->heap()->undefined_value(), isolate);
- Unique<HeapObject> unique_constant =
- Unique<HeapObject>::CreateUninitialized(object);
-
- // Manually transcripted code for:
- // function turbo_fan_test(a, b, c) {
- // while (a < b) {
- // a += b + c;
- // }
- // return a;
- // }
- op = common_builder.Start(0);
- Node* n0 = graph.NewNode(op);
- USE(n0);
- Node* nil = graph.NewNode(common_builder.Dead());
- op = common_builder.End();
- Node* n22 = graph.NewNode(op, nil);
- USE(n22);
- op = common_builder.Return();
- Node* n21 = graph.NewNode(op, nil, nil, nil);
- USE(n21);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n9 = graph.NewNode(op, nil, nil, nil);
- USE(n9);
- op = common_builder.Parameter(0);
- Node* n2 = graph.NewNode(op, n0);
- USE(n2);
- n9->ReplaceInput(0, n2);
- op = js_builder.Add();
- Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n20);
- n20->ReplaceInput(0, n9);
- op = &kIntAdd;
- Node* n19 = graph.NewNode(op, nil, nil);
- USE(n19);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n10 = graph.NewNode(op, nil, nil, nil);
- USE(n10);
- op = common_builder.Parameter(0);
- Node* n3 = graph.NewNode(op, n0);
- USE(n3);
- n10->ReplaceInput(0, n3);
- n10->ReplaceInput(1, n10);
- op = common_builder.Loop(2);
- Node* n7 = graph.NewNode(op, nil, nil);
- USE(n7);
- n7->ReplaceInput(0, n0);
- op = common_builder.IfTrue();
- Node* n17 = graph.NewNode(op, nil);
- USE(n17);
- op = common_builder.Branch();
- Node* n16 = graph.NewNode(op, nil, nil);
- USE(n16);
- op = js_builder.ToBoolean();
- Node* n15 = graph.NewNode(op, nil, nil, nil, nil);
- USE(n15);
- op = js_builder.LessThan();
- Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
- USE(n14);
- n14->ReplaceInput(0, n9);
- n14->ReplaceInput(1, n10);
- op = common_builder.HeapConstant(unique_constant);
- Node* n6 = graph.NewNode(op);
- USE(n6);
- n14->ReplaceInput(2, n6);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n12 = graph.NewNode(op, nil, nil, nil);
- USE(n12);
- n12->ReplaceInput(0, n0);
- n12->ReplaceInput(1, n20);
- n12->ReplaceInput(2, n7);
- n14->ReplaceInput(3, n12);
- n14->ReplaceInput(4, n7);
- n15->ReplaceInput(0, n14);
- n15->ReplaceInput(1, n6);
- n15->ReplaceInput(2, n14);
- n15->ReplaceInput(3, n7);
- n16->ReplaceInput(0, n15);
- n16->ReplaceInput(1, n7);
- n17->ReplaceInput(0, n16);
- n7->ReplaceInput(1, n17);
- n10->ReplaceInput(2, n7);
- n19->ReplaceInput(0, n2);
- op = common_builder.Phi(kMachAnyTagged, 2);
- Node* n11 = graph.NewNode(op, nil, nil, nil);
- USE(n11);
- op = common_builder.Parameter(0);
- Node* n4 = graph.NewNode(op, n0);
- USE(n4);
- n11->ReplaceInput(0, n4);
- n11->ReplaceInput(1, n11);
- n11->ReplaceInput(2, n7);
- n19->ReplaceInput(1, n3);
- n20->ReplaceInput(1, n19);
- n20->ReplaceInput(2, n6);
- n20->ReplaceInput(3, n19);
- n20->ReplaceInput(4, n17);
- n9->ReplaceInput(1, n20);
- n9->ReplaceInput(2, n7);
- n21->ReplaceInput(0, n9);
- n21->ReplaceInput(1, n15);
- op = common_builder.IfFalse();
- Node* n18 = graph.NewNode(op, nil);
- USE(n18);
- n18->ReplaceInput(0, n16);
- n21->ReplaceInput(2, n18);
- n22->ReplaceInput(0, n21);
-
- graph.SetStart(n0);
- graph.SetEnd(n22);
-
- Schedule* schedule = ComputeAndVerifySchedule(19, &graph);
- // Make sure the integer-only add gets hoisted to a different block that the
- // JSAdd.
- CHECK(schedule->block(n19) != schedule->block(n20));
-}
-
-
-#if V8_TURBOFAN_TARGET
-
-static Node* CreateDiamond(Graph* graph, CommonOperatorBuilder* common,
- Node* cond) {
- Node* tv = graph->NewNode(common->Int32Constant(6));
- Node* fv = graph->NewNode(common->Int32Constant(7));
- Node* br = graph->NewNode(common->Branch(), cond, graph->start());
- Node* t = graph->NewNode(common->IfTrue(), br);
- Node* f = graph->NewNode(common->IfFalse(), br);
- Node* m = graph->NewNode(common->Merge(2), t, f);
- Node* phi = graph->NewNode(common->Phi(kMachAnyTagged, 2), tv, fv, m);
- return phi;
-}
-
-
-TEST(FloatingDiamond1) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(1));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
- Node* d1 = CreateDiamond(&graph, &common, p0);
- Node* ret = graph.NewNode(common.Return(), d1, start, start);
- Node* end = graph.NewNode(common.End(), ret, start);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(13, &graph);
-}
-
-
-TEST(FloatingDiamond2) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(2));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
- Node* p1 = graph.NewNode(common.Parameter(1), start);
- Node* d1 = CreateDiamond(&graph, &common, p0);
- Node* d2 = CreateDiamond(&graph, &common, p1);
- Node* add = graph.NewNode(&kIntAdd, d1, d2);
- Node* ret = graph.NewNode(common.Return(), add, start, start);
- Node* end = graph.NewNode(common.End(), ret, start);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(24, &graph);
-}
-
-
-TEST(FloatingDiamond3) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(2));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
- Node* p1 = graph.NewNode(common.Parameter(1), start);
- Node* d1 = CreateDiamond(&graph, &common, p0);
- Node* d2 = CreateDiamond(&graph, &common, p1);
- Node* add = graph.NewNode(&kIntAdd, d1, d2);
- Node* d3 = CreateDiamond(&graph, &common, add);
- Node* ret = graph.NewNode(common.Return(), d3, start, start);
- Node* end = graph.NewNode(common.End(), ret, start);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(33, &graph);
-}
-
-
-TEST(NestedFloatingDiamonds) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
- SimplifiedOperatorBuilder simplified(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(2));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
-
- Node* fv = graph.NewNode(common.Int32Constant(7));
- Node* br = graph.NewNode(common.Branch(), p0, graph.start());
- Node* t = graph.NewNode(common.IfTrue(), br);
- Node* f = graph.NewNode(common.IfFalse(), br);
-
- Node* map = graph.NewNode(
- simplified.LoadElement(AccessBuilder::ForFixedArrayElement()), p0, p0, p0,
- start, f);
- Node* br1 = graph.NewNode(common.Branch(), map, graph.start());
- Node* t1 = graph.NewNode(common.IfTrue(), br1);
- Node* f1 = graph.NewNode(common.IfFalse(), br1);
- Node* m1 = graph.NewNode(common.Merge(2), t1, f1);
- Node* ttrue = graph.NewNode(common.Int32Constant(1));
- Node* ffalse = graph.NewNode(common.Int32Constant(0));
- Node* phi1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), ttrue, ffalse, m1);
-
-
- Node* m = graph.NewNode(common.Merge(2), t, f);
- Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 2), fv, phi1, m);
- Node* ephi1 = graph.NewNode(common.EffectPhi(2), start, map, m);
-
- Node* ret = graph.NewNode(common.Return(), phi, ephi1, start);
- Node* end = graph.NewNode(common.End(), ret, start);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(23, &graph);
-}
-
-
-TEST(NestedFloatingDiamondWithChain) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(2));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
- Node* p1 = graph.NewNode(common.Parameter(1), start);
- Node* c = graph.NewNode(common.Int32Constant(7));
-
- Node* brA1 = graph.NewNode(common.Branch(), p0, graph.start());
- Node* tA1 = graph.NewNode(common.IfTrue(), brA1);
- Node* fA1 = graph.NewNode(common.IfFalse(), brA1);
- Node* mA1 = graph.NewNode(common.Merge(2), tA1, fA1);
- Node* phiA1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p1, mA1);
-
- Node* brB1 = graph.NewNode(common.Branch(), p1, graph.start());
- Node* tB1 = graph.NewNode(common.IfTrue(), brB1);
- Node* fB1 = graph.NewNode(common.IfFalse(), brB1);
- Node* mB1 = graph.NewNode(common.Merge(2), tB1, fB1);
- Node* phiB1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p1, mB1);
-
- Node* brA2 = graph.NewNode(common.Branch(), phiB1, mA1);
- Node* tA2 = graph.NewNode(common.IfTrue(), brA2);
- Node* fA2 = graph.NewNode(common.IfFalse(), brA2);
- Node* mA2 = graph.NewNode(common.Merge(2), tA2, fA2);
- Node* phiA2 = graph.NewNode(common.Phi(kMachAnyTagged, 2), phiB1, c, mA2);
-
- Node* brB2 = graph.NewNode(common.Branch(), phiA1, mB1);
- Node* tB2 = graph.NewNode(common.IfTrue(), brB2);
- Node* fB2 = graph.NewNode(common.IfFalse(), brB2);
- Node* mB2 = graph.NewNode(common.Merge(2), tB2, fB2);
- Node* phiB2 = graph.NewNode(common.Phi(kMachAnyTagged, 2), phiA1, c, mB2);
-
- Node* add = graph.NewNode(&kIntAdd, phiA2, phiB2);
- Node* ret = graph.NewNode(common.Return(), add, start, start);
- Node* end = graph.NewNode(common.End(), ret, start);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(35, &graph);
-}
-
-
-TEST(NestedFloatingDiamondWithLoop) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(2));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
-
- Node* fv = graph.NewNode(common.Int32Constant(7));
- Node* br = graph.NewNode(common.Branch(), p0, graph.start());
- Node* t = graph.NewNode(common.IfTrue(), br);
- Node* f = graph.NewNode(common.IfFalse(), br);
-
- Node* loop = graph.NewNode(common.Loop(2), f, start);
- Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
-
- Node* add = graph.NewNode(&kIntAdd, ind, fv);
- Node* br1 = graph.NewNode(common.Branch(), add, loop);
- Node* t1 = graph.NewNode(common.IfTrue(), br1);
- Node* f1 = graph.NewNode(common.IfFalse(), br1);
-
- loop->ReplaceInput(1, t1); // close loop.
- ind->ReplaceInput(1, ind); // close induction variable.
-
- Node* m = graph.NewNode(common.Merge(2), t, f1);
- Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 2), fv, ind, m);
-
- Node* ret = graph.NewNode(common.Return(), phi, start, start);
- Node* end = graph.NewNode(common.End(), ret, start);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(20, &graph);
-}
-
-
-TEST(LoopedFloatingDiamond1) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(2));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
-
- Node* c = graph.NewNode(common.Int32Constant(7));
- Node* loop = graph.NewNode(common.Loop(2), start, start);
- Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
- Node* add = graph.NewNode(&kIntAdd, ind, c);
-
- Node* br = graph.NewNode(common.Branch(), add, loop);
- Node* t = graph.NewNode(common.IfTrue(), br);
- Node* f = graph.NewNode(common.IfFalse(), br);
-
- Node* br1 = graph.NewNode(common.Branch(), p0, graph.start());
- Node* t1 = graph.NewNode(common.IfTrue(), br1);
- Node* f1 = graph.NewNode(common.IfFalse(), br1);
- Node* m1 = graph.NewNode(common.Merge(2), t1, f1);
- Node* phi1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), add, p0, m1);
-
- loop->ReplaceInput(1, t); // close loop.
- ind->ReplaceInput(1, phi1); // close induction variable.
-
- Node* ret = graph.NewNode(common.Return(), ind, start, f);
- Node* end = graph.NewNode(common.End(), ret, f);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(20, &graph);
-}
-
-
-TEST(LoopedFloatingDiamond2) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(2));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
-
- Node* c = graph.NewNode(common.Int32Constant(7));
- Node* loop = graph.NewNode(common.Loop(2), start, start);
- Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
-
- Node* br1 = graph.NewNode(common.Branch(), p0, graph.start());
- Node* t1 = graph.NewNode(common.IfTrue(), br1);
- Node* f1 = graph.NewNode(common.IfFalse(), br1);
- Node* m1 = graph.NewNode(common.Merge(2), t1, f1);
- Node* phi1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), c, ind, m1);
-
- Node* add = graph.NewNode(&kIntAdd, ind, phi1);
-
- Node* br = graph.NewNode(common.Branch(), add, loop);
- Node* t = graph.NewNode(common.IfTrue(), br);
- Node* f = graph.NewNode(common.IfFalse(), br);
-
- loop->ReplaceInput(1, t); // close loop.
- ind->ReplaceInput(1, add); // close induction variable.
-
- Node* ret = graph.NewNode(common.Return(), ind, start, f);
- Node* end = graph.NewNode(common.End(), ret, f);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(20, &graph);
-}
-
-
-TEST(LoopedFloatingDiamond3) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(2));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
-
- Node* c = graph.NewNode(common.Int32Constant(7));
- Node* loop = graph.NewNode(common.Loop(2), start, start);
- Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
-
- Node* br1 = graph.NewNode(common.Branch(), p0, graph.start());
- Node* t1 = graph.NewNode(common.IfTrue(), br1);
- Node* f1 = graph.NewNode(common.IfFalse(), br1);
-
- Node* loop1 = graph.NewNode(common.Loop(2), t1, start);
- Node* ind1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
-
- Node* add1 = graph.NewNode(&kIntAdd, ind1, c);
- Node* br2 = graph.NewNode(common.Branch(), add1, loop1);
- Node* t2 = graph.NewNode(common.IfTrue(), br2);
- Node* f2 = graph.NewNode(common.IfFalse(), br2);
-
- loop1->ReplaceInput(1, t2); // close inner loop.
- ind1->ReplaceInput(1, ind1); // close inner induction variable.
-
- Node* m1 = graph.NewNode(common.Merge(2), f1, f2);
- Node* phi1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), c, ind1, m1);
-
- Node* add = graph.NewNode(&kIntAdd, ind, phi1);
-
- Node* br = graph.NewNode(common.Branch(), add, loop);
- Node* t = graph.NewNode(common.IfTrue(), br);
- Node* f = graph.NewNode(common.IfFalse(), br);
-
- loop->ReplaceInput(1, t); // close loop.
- ind->ReplaceInput(1, add); // close induction variable.
-
- Node* ret = graph.NewNode(common.Return(), ind, start, f);
- Node* end = graph.NewNode(common.End(), ret, f);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(28, &graph);
-}
-
-
-TEST(PhisPushedDownToDifferentBranches) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(2));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
- Node* p1 = graph.NewNode(common.Parameter(1), start);
-
- Node* v1 = graph.NewNode(common.Int32Constant(1));
- Node* v2 = graph.NewNode(common.Int32Constant(2));
- Node* v3 = graph.NewNode(common.Int32Constant(3));
- Node* v4 = graph.NewNode(common.Int32Constant(4));
- Node* br = graph.NewNode(common.Branch(), p0, graph.start());
- Node* t = graph.NewNode(common.IfTrue(), br);
- Node* f = graph.NewNode(common.IfFalse(), br);
- Node* m = graph.NewNode(common.Merge(2), t, f);
- Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 2), v1, v2, m);
- Node* phi2 = graph.NewNode(common.Phi(kMachAnyTagged, 2), v3, v4, m);
-
- Node* br2 = graph.NewNode(common.Branch(), p1, graph.start());
- Node* t2 = graph.NewNode(common.IfTrue(), br2);
- Node* f2 = graph.NewNode(common.IfFalse(), br2);
- Node* m2 = graph.NewNode(common.Merge(2), t2, f2);
- Node* phi3 = graph.NewNode(common.Phi(kMachAnyTagged, 2), phi, phi2, m2);
-
- Node* ret = graph.NewNode(common.Return(), phi3, start, start);
- Node* end = graph.NewNode(common.End(), ret, start);
-
- graph.SetEnd(end);
-
- ComputeAndVerifySchedule(24, &graph);
-}
-
-
-TEST(BranchHintTrue) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(1));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
- Node* tv = graph.NewNode(common.Int32Constant(6));
- Node* fv = graph.NewNode(common.Int32Constant(7));
- Node* br = graph.NewNode(common.Branch(BranchHint::kTrue), p0, start);
- Node* t = graph.NewNode(common.IfTrue(), br);
- Node* f = graph.NewNode(common.IfFalse(), br);
- Node* m = graph.NewNode(common.Merge(2), t, f);
- Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 2), tv, fv, m);
- Node* ret = graph.NewNode(common.Return(), phi, start, start);
- Node* end = graph.NewNode(common.End(), ret, start);
-
- graph.SetEnd(end);
-
- Schedule* schedule = ComputeAndVerifySchedule(13, &graph);
- // Make sure the false block is marked as deferred.
- CHECK(!schedule->block(t)->deferred());
- CHECK(schedule->block(f)->deferred());
-}
-
-
-TEST(BranchHintFalse) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(1));
- graph.SetStart(start);
-
- Node* p0 = graph.NewNode(common.Parameter(0), start);
- Node* tv = graph.NewNode(common.Int32Constant(6));
- Node* fv = graph.NewNode(common.Int32Constant(7));
- Node* br = graph.NewNode(common.Branch(BranchHint::kFalse), p0, start);
- Node* t = graph.NewNode(common.IfTrue(), br);
- Node* f = graph.NewNode(common.IfFalse(), br);
- Node* m = graph.NewNode(common.Merge(2), t, f);
- Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 2), tv, fv, m);
- Node* ret = graph.NewNode(common.Return(), phi, start, start);
- Node* end = graph.NewNode(common.End(), ret, start);
-
- graph.SetEnd(end);
-
- Schedule* schedule = ComputeAndVerifySchedule(13, &graph);
- // Make sure the true block is marked as deferred.
- CHECK(schedule->block(t)->deferred());
- CHECK(!schedule->block(f)->deferred());
-}
-
-
-TEST(ScheduleTerminate) {
- HandleAndZoneScope scope;
- Graph graph(scope.main_zone());
- CommonOperatorBuilder common(scope.main_zone());
-
- Node* start = graph.NewNode(common.Start(1));
- graph.SetStart(start);
-
- Node* loop = graph.NewNode(common.Loop(2), start, start);
- loop->ReplaceInput(1, loop); // self loop, NTL.
-
- Node* effect = graph.NewNode(common.EffectPhi(1), start, loop);
- effect->ReplaceInput(0, effect);
-
- Node* terminate = graph.NewNode(common.Terminate(1), effect, loop);
- Node* end = graph.NewNode(common.End(), terminate);
-
- graph.SetEnd(end);
-
- Schedule* schedule = ComputeAndVerifySchedule(6, &graph);
- BasicBlock* block = schedule->block(loop);
- CHECK_NE(NULL, loop);
- CHECK_EQ(block, schedule->block(effect));
- CHECK_GE(block->rpo_number(), 0);
-}
-
-#endif
diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
index 147aa323ff..6e2480e51e 100644
--- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
@@ -9,10 +9,11 @@
#include "src/compiler/control-builders.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/source-position.h"
#include "src/compiler/typer.h"
#include "src/compiler/verifier.h"
#include "src/execution.h"
@@ -36,14 +37,17 @@ class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
MachineType p3 = kMachNone,
MachineType p4 = kMachNone)
: GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4),
- typer(this->graph(), MaybeHandle<Context>()),
+ typer(this->isolate(), this->graph(), MaybeHandle<Context>()),
javascript(this->zone()),
- jsgraph(this->graph(), this->common(), &javascript, this->machine()),
- lowering(&jsgraph, this->zone()) {}
+ jsgraph(this->isolate(), this->graph(), this->common(), &javascript,
+ this->machine()),
+ source_positions(jsgraph.graph()),
+ lowering(&jsgraph, this->zone(), &source_positions) {}
Typer typer;
JSOperatorBuilder javascript;
JSGraph jsgraph;
+ SourcePositionTable source_positions;
SimplifiedLowering lowering;
void LowerAllNodes() {
@@ -57,11 +61,7 @@ class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
typer.Run();
lowering.LowerAllNodes();
- Zone* zone = this->zone();
- CompilationInfo info(zone->isolate(), zone);
- Linkage linkage(
- zone, Linkage::GetSimplifiedCDescriptor(zone, this->machine_sig_));
- ChangeLowering lowering(&jsgraph, &linkage);
+ ChangeLowering lowering(&jsgraph);
GraphReducer reducer(this->graph(), this->zone());
reducer.AddReducer(&lowering);
reducer.ReduceGraph();
@@ -666,9 +666,9 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None(),
Type* p2_type = Type::None())
: GraphAndBuilders(main_zone()),
- typer(graph(), MaybeHandle<Context>()),
+ typer(main_isolate(), graph(), MaybeHandle<Context>()),
javascript(main_zone()),
- jsgraph(graph(), common(), &javascript, machine()) {
+ jsgraph(main_isolate(), graph(), common(), &javascript, machine()) {
start = graph()->NewNode(common()->Start(2));
graph()->SetStart(start);
ret =
@@ -698,7 +698,10 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
CHECK_EQ(expected, node->opcode());
}
- void Lower() { SimplifiedLowering(&jsgraph, jsgraph.zone()).LowerAllNodes(); }
+ void Lower() {
+ SourcePositionTable table(jsgraph.graph());
+ SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
+ }
// Inserts the node as the return value of the graph.
Node* Return(Node* node) {
@@ -787,31 +790,6 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
};
-TEST(LowerAnyToBoolean_bit_bit) {
- // AnyToBoolean(x: kRepBit) used as kRepBit
- HandleAndZoneScope scope;
- Factory* f = scope.main_zone()->isolate()->factory();
- Handle<Object> zero = f->NewNumber(0);
- Handle<Object> one = f->NewNumber(1);
- Type* singleton_zero = Type::Constant(zero, scope.main_zone());
- Type* singleton_one = Type::Constant(one, scope.main_zone());
- Type* zero_one_range = Type::Range(zero, one, scope.main_zone());
- static Type* kTypes[] = {
- singleton_zero, singleton_one, zero_one_range, Type::Boolean(),
- Type::Union(Type::Boolean(), singleton_zero, scope.main_zone()),
- Type::Union(Type::Boolean(), singleton_one, scope.main_zone()),
- Type::Union(Type::Boolean(), zero_one_range, scope.main_zone())};
- for (Type* type : kTypes) {
- TestingGraph t(type);
- Node* x = t.ExampleWithTypeAndRep(type, kRepBit);
- Node* cnv = t.graph()->NewNode(t.simplified()->AnyToBoolean(), x);
- Node* use = t.Branch(cnv);
- t.Lower();
- CHECK_EQ(x, use->InputAt(0));
- }
-}
-
-
#if V8_TURBOFAN_TARGET
TEST(LowerAnyToBoolean_tagged_tagged) {
@@ -996,11 +974,8 @@ TEST(LowerNumberCmp_to_float64) {
TEST(LowerNumberAddSub_to_int32) {
HandleAndZoneScope scope;
- Factory* f = scope.main_zone()->isolate()->factory();
- Type* small_range =
- Type::Range(f->NewNumber(1), f->NewNumber(10), scope.main_zone());
- Type* large_range =
- Type::Range(f->NewNumber(-1e+13), f->NewNumber(1e+14), scope.main_zone());
+ Type* small_range = Type::Range(1, 10, scope.main_zone());
+ Type* large_range = Type::Range(-1e+13, 1e+14, scope.main_zone());
static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
large_range};
@@ -1020,11 +995,8 @@ TEST(LowerNumberAddSub_to_int32) {
TEST(LowerNumberAddSub_to_uint32) {
HandleAndZoneScope scope;
- Factory* f = scope.main_zone()->isolate()->factory();
- Type* small_range =
- Type::Range(f->NewNumber(1), f->NewNumber(10), scope.main_zone());
- Type* large_range =
- Type::Range(f->NewNumber(-1e+13), f->NewNumber(1e+14), scope.main_zone());
+ Type* small_range = Type::Range(1, 10, scope.main_zone());
+ Type* large_range = Type::Range(-1e+13, 1e+14, scope.main_zone());
static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
large_range};
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 218a773fa9..208fa437c2 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -44,7 +44,7 @@ class ValueHelper {
void CheckUint32Constant(int32_t expected, Node* node) {
CHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
- CHECK_EQ(expected, OpParameter<uint32_t>(node));
+ CHECK_EQ(expected, OpParameter<int32_t>(node));
}
void CheckHeapConstant(Object* expected, Node* node) {
@@ -100,7 +100,7 @@ class ValueHelper {
}
static std::vector<double> float64_vector() {
- static const double nan = v8::base::OS::nan_value();
+ static const double nan = std::numeric_limits<double>::quiet_NaN();
static const double values[] = {
0.125, 0.25, 0.375, 0.5,
1.25, -1.75, 2, 5.125,
@@ -134,7 +134,7 @@ class ValueHelper {
}
static const std::vector<double> nan_vector(size_t limit = 0) {
- static const double nan = v8::base::OS::nan_value();
+ static const double nan = std::numeric_limits<double>::quiet_NaN();
static const double values[] = {-nan, -V8_INFINITY * -0.0,
-V8_INFINITY * 0.0, V8_INFINITY * -0.0,
V8_INFINITY * 0.0, nan};
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 5f452ead07..bbb74c0a71 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -150,20 +150,20 @@ static void XGetter(const Info& info, int offset) {
ApiTestFuzzer::Fuzz();
v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, info.GetIsolate());
- CHECK_EQ(x_receiver, info.This());
+ CHECK(x_receiver->Equals(info.This()));
info.GetReturnValue().Set(v8_num(x_register[offset]));
}
static void XGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK_EQ(x_holder, info.Holder());
+ CHECK(x_holder->Equals(info.Holder()));
XGetter(info, 0);
}
static void XGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
- CHECK_EQ(x_receiver, info.Holder());
+ CHECK(x_receiver->Equals(info.Holder()));
XGetter(info, 1);
}
@@ -172,8 +172,8 @@ template<class Info>
static void XSetter(Local<Value> value, const Info& info, int offset) {
v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, info.GetIsolate());
- CHECK_EQ(x_holder, info.This());
- CHECK_EQ(x_holder, info.Holder());
+ CHECK(x_holder->Equals(info.This()));
+ CHECK(x_holder->Equals(info.Holder()));
x_register[offset] = value->Int32Value();
info.GetReturnValue().Set(v8_num(-1));
}
@@ -222,10 +222,10 @@ THREADED_TEST(AccessorIC) {
" result.push(obj[key_1]);"
"}"
"result"));
- CHECK_EQ(80, array->Length());
+ CHECK_EQ(80u, array->Length());
for (int i = 0; i < 80; i++) {
v8::Handle<Value> entry = array->Get(v8::Integer::New(isolate, i));
- CHECK_EQ(v8::Integer::New(isolate, i/2), entry);
+ CHECK(v8::Integer::New(isolate, i / 2)->Equals(entry));
}
}
@@ -407,7 +407,7 @@ THREADED_TEST(Regress1054726) {
"for (var i = 0; i < 5; i++) {"
" try { obj.x; } catch (e) { result += e; }"
"}; result"))->Run();
- CHECK_EQ(v8_str("ggggg"), result);
+ CHECK(v8_str("ggggg")->Equals(result));
result = Script::Compile(String::NewFromUtf8(
isolate,
@@ -415,7 +415,7 @@ THREADED_TEST(Regress1054726) {
"for (var i = 0; i < 5; i++) {"
" try { obj.x = i; } catch (e) { result += e; }"
"}; result"))->Run();
- CHECK_EQ(v8_str("01234"), result);
+ CHECK(v8_str("01234")->Equals(result));
}
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index 2e071acc73..79ba4a486e 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -152,8 +152,8 @@ TEST(StressJS) {
Handle<AccessorInfo> foreign = TestAccessorInfo(isolate, attrs);
Map::EnsureDescriptorSlack(map, 1);
- CallbacksDescriptor d(Handle<Name>(Name::cast(foreign->name())),
- foreign, attrs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(foreign->name())),
+ foreign, attrs);
map->AppendDescriptor(&d);
// Add the Foo constructor the global object.
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
new file mode 100644
index 0000000000..a2acb24d76
--- /dev/null
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -0,0 +1,3110 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "test/cctest/test-api.h"
+
+#include "include/v8-util.h"
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/base/platform/platform.h"
+#include "src/compilation-cache.h"
+#include "src/execution.h"
+#include "src/objects.h"
+#include "src/parser.h"
+#include "src/smart-pointers.h"
+#include "src/snapshot.h"
+#include "src/unicode-inl.h"
+#include "src/utils.h"
+#include "src/vm-state.h"
+
+using ::v8::Boolean;
+using ::v8::BooleanObject;
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::FunctionTemplate;
+using ::v8::Handle;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Name;
+using ::v8::Message;
+using ::v8::MessageCallback;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::StackTrace;
+using ::v8::String;
+using ::v8::Symbol;
+using ::v8::TryCatch;
+using ::v8::Undefined;
+using ::v8::UniqueId;
+using ::v8::V8;
+using ::v8::Value;
+
+
+namespace {
+
+void Returns42(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(42);
+}
+
+void Return239Callback(Local<String> name,
+ const v8::PropertyCallbackInfo<Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info, FUNCTION_ADDR(Return239Callback));
+ info.GetReturnValue().Set(v8_str("bad value"));
+ info.GetReturnValue().Set(v8_num(239));
+}
+
+
+void EmptyInterceptorGetter(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {}
+
+
+void EmptyInterceptorSetter(Local<Name> name, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {}
+
+
+void SimpleAccessorGetter(Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ Handle<Object> self = Handle<Object>::Cast(info.This());
+ info.GetReturnValue().Set(
+ self->Get(String::Concat(v8_str("accessor_"), name)));
+}
+
+void SimpleAccessorSetter(Local<String> name, Local<Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ Handle<Object> self = Handle<Object>::Cast(info.This());
+ self->Set(String::Concat(v8_str("accessor_"), name), value);
+}
+
+
+void SymbolAccessorGetter(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(name->IsSymbol());
+ Local<Symbol> sym = Local<Symbol>::Cast(name);
+ if (sym->Name()->IsUndefined()) return;
+ SimpleAccessorGetter(Local<String>::Cast(sym->Name()), info);
+}
+
+void SymbolAccessorSetter(Local<Name> name, Local<Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ CHECK(name->IsSymbol());
+ Local<Symbol> sym = Local<Symbol>::Cast(name);
+ if (sym->Name()->IsUndefined()) return;
+ SimpleAccessorSetter(Local<String>::Cast(sym->Name()), value, info);
+}
+
+void StringInterceptorGetter(
+ Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>&
+ info) { // Intercept names that start with 'interceptor_'.
+ String::Utf8Value utf8(name);
+ char* name_str = *utf8;
+ char prefix[] = "interceptor_";
+ int i;
+ for (i = 0; name_str[i] && prefix[i]; ++i) {
+ if (name_str[i] != prefix[i]) return;
+ }
+ Handle<Object> self = Handle<Object>::Cast(info.This());
+ info.GetReturnValue().Set(self->GetHiddenValue(v8_str(name_str + i)));
+}
+
+
+void StringInterceptorSetter(Local<String> name, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ // Intercept accesses that set certain integer values, for which the name does
+ // not start with 'accessor_'.
+ String::Utf8Value utf8(name);
+ char* name_str = *utf8;
+ char prefix[] = "accessor_";
+ int i;
+ for (i = 0; name_str[i] && prefix[i]; ++i) {
+ if (name_str[i] != prefix[i]) break;
+ }
+ if (!prefix[i]) return;
+
+ if (value->IsInt32() && value->Int32Value() < 10000) {
+ Handle<Object> self = Handle<Object>::Cast(info.This());
+ self->SetHiddenValue(name, value);
+ info.GetReturnValue().Set(value);
+ }
+}
+
+void InterceptorGetter(Local<Name> generic_name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ if (generic_name->IsSymbol()) return;
+ StringInterceptorGetter(Local<String>::Cast(generic_name), info);
+}
+
+void InterceptorSetter(Local<Name> generic_name, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ if (generic_name->IsSymbol()) return;
+ StringInterceptorSetter(Local<String>::Cast(generic_name), value, info);
+}
+
+void GenericInterceptorGetter(Local<Name> generic_name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ Local<String> str;
+ if (generic_name->IsSymbol()) {
+ Local<Value> name = Local<Symbol>::Cast(generic_name)->Name();
+ if (name->IsUndefined()) return;
+ str = String::Concat(v8_str("_sym_"), Local<String>::Cast(name));
+ } else {
+ Local<String> name = Local<String>::Cast(generic_name);
+ String::Utf8Value utf8(name);
+ char* name_str = *utf8;
+ if (*name_str == '_') return;
+ str = String::Concat(v8_str("_str_"), name);
+ }
+
+ Handle<Object> self = Handle<Object>::Cast(info.This());
+ info.GetReturnValue().Set(self->Get(str));
+}
+
+void GenericInterceptorSetter(Local<Name> generic_name, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ Local<String> str;
+ if (generic_name->IsSymbol()) {
+ Local<Value> name = Local<Symbol>::Cast(generic_name)->Name();
+ if (name->IsUndefined()) return;
+ str = String::Concat(v8_str("_sym_"), Local<String>::Cast(name));
+ } else {
+ Local<String> name = Local<String>::Cast(generic_name);
+ String::Utf8Value utf8(name);
+ char* name_str = *utf8;
+ if (*name_str == '_') return;
+ str = String::Concat(v8_str("_str_"), name);
+ }
+
+ Handle<Object> self = Handle<Object>::Cast(info.This());
+ self->Set(str, value);
+ info.GetReturnValue().Set(value);
+}
+
+void AddAccessor(Handle<FunctionTemplate> templ, Handle<String> name,
+ v8::AccessorGetterCallback getter,
+ v8::AccessorSetterCallback setter) {
+ templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
+}
+
+void AddInterceptor(Handle<FunctionTemplate> templ,
+ v8::NamedPropertyGetterCallback getter,
+ v8::NamedPropertySetterCallback setter) {
+ templ->InstanceTemplate()->SetNamedPropertyHandler(getter, setter);
+}
+
+
+void AddAccessor(Handle<FunctionTemplate> templ, Handle<Name> name,
+ v8::AccessorNameGetterCallback getter,
+ v8::AccessorNameSetterCallback setter) {
+ templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
+}
+
+void AddInterceptor(Handle<FunctionTemplate> templ,
+ v8::GenericNamedPropertyGetterCallback getter,
+ v8::GenericNamedPropertySetterCallback setter) {
+ templ->InstanceTemplate()->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(getter, setter));
+}
+
+
+v8::Handle<v8::Object> bottom;
+
+void CheckThisIndexedPropertyHandler(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyHandler));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+void CheckThisNamedPropertyHandler(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyHandler));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+void CheckThisIndexedPropertySetter(
+ uint32_t index, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertySetter));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+
+void CheckThisNamedPropertySetter(
+ Local<Name> property, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertySetter));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+void CheckThisIndexedPropertyQuery(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyQuery));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+
+void CheckThisNamedPropertyQuery(
+ Local<Name> property, const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyQuery));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+
+void CheckThisIndexedPropertyDeleter(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyDeleter));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+
+void CheckThisNamedPropertyDeleter(
+ Local<Name> property, const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyDeleter));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+
+void CheckThisIndexedPropertyEnumerator(
+ const v8::PropertyCallbackInfo<v8::Array>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyEnumerator));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+
+void CheckThisNamedPropertyEnumerator(
+ const v8::PropertyCallbackInfo<v8::Array>& info) {
+ CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyEnumerator));
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.This()->Equals(bottom));
+}
+
+
+int echo_named_call_count;
+
+
+void EchoNamedProperty(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(v8_str("data")->Equals(info.Data()));
+ echo_named_call_count++;
+ info.GetReturnValue().Set(name);
+}
+
+void InterceptorHasOwnPropertyGetter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+}
+
+void InterceptorHasOwnPropertyGetterGC(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+}
+
+} // namespace
+
+
+THREADED_TEST(InterceptorHasOwnProperty) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
+ Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
+ instance_templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorHasOwnPropertyGetter));
+ Local<Function> function = fun_templ->GetFunction();
+ context->Global()->Set(v8_str("constructor"), function);
+ v8::Handle<Value> value = CompileRun(
+ "var o = new constructor();"
+ "o.hasOwnProperty('ostehaps');");
+ CHECK_EQ(false, value->BooleanValue());
+ value = CompileRun(
+ "o.ostehaps = 42;"
+ "o.hasOwnProperty('ostehaps');");
+ CHECK_EQ(true, value->BooleanValue());
+ value = CompileRun(
+ "var p = new constructor();"
+ "p.hasOwnProperty('ostehaps');");
+ CHECK_EQ(false, value->BooleanValue());
+}
+
+
+THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
+ Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
+ instance_templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorHasOwnPropertyGetterGC));
+ Local<Function> function = fun_templ->GetFunction();
+ context->Global()->Set(v8_str("constructor"), function);
+ // Let's first make some stuff so we can be sure to get a good GC.
+ CompileRun(
+ "function makestr(size) {"
+ " switch (size) {"
+ " case 1: return 'f';"
+ " case 2: return 'fo';"
+ " case 3: return 'foo';"
+ " }"
+ " return makestr(size >> 1) + makestr((size + 1) >> 1);"
+ "}"
+ "var x = makestr(12345);"
+ "x = makestr(31415);"
+ "x = makestr(23456);");
+ v8::Handle<Value> value = CompileRun(
+ "var o = new constructor();"
+ "o.__proto__ = new String(x);"
+ "o.hasOwnProperty('ostehaps');");
+ CHECK_EQ(false, value->BooleanValue());
+}
+
+
+static void CheckInterceptorLoadIC(
+ v8::GenericNamedPropertyGetterCallback getter, const char* source,
+ int expected) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(getter, 0, 0, 0, 0,
+ v8_str("data")));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(source);
+ CHECK_EQ(expected, value->Int32Value());
+}
+
+
+static void InterceptorLoadICGetter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = CcTest::isolate();
+ CHECK_EQ(isolate, info.GetIsolate());
+ CHECK(v8_str("data")->Equals(info.Data()));
+ CHECK(v8_str("x")->Equals(name));
+ info.GetReturnValue().Set(v8::Integer::New(isolate, 42));
+}
+
+
+// This test should hit the load IC for the interceptor case.
+THREADED_TEST(InterceptorLoadIC) {
+ CheckInterceptorLoadIC(InterceptorLoadICGetter,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.x;"
+ "}",
+ 42);
+}
+
+
+// Below go several tests which verify that JITing for various
+// configurations of interceptor and explicit fields works fine
+// (those cases are special cased to get better performance).
+
+static void InterceptorLoadXICGetter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ info.GetReturnValue().Set(
+ v8_str("x")->Equals(name)
+ ? v8::Handle<v8::Value>(v8::Integer::New(info.GetIsolate(), 42))
+ : v8::Handle<v8::Value>());
+}
+
+
+THREADED_TEST(InterceptorLoadICWithFieldOnHolder) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "var result = 0;"
+ "o.y = 239;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.y;"
+ "}",
+ 239);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithSubstitutedProto) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "var result = 0;"
+ "o.__proto__ = { 'y': 239 };"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.y + o.x;"
+ "}",
+ 239 + 42);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithPropertyOnProto) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "var result = 0;"
+ "o.__proto__.y = 239;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.y + o.x;"
+ "}",
+ 239 + 42);
+}
+
+
+THREADED_TEST(InterceptorLoadICUndefined) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = (o.y == undefined) ? 239 : 42;"
+ "}",
+ 239);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithOverride) {
+ CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+ "fst = new Object(); fst.__proto__ = o;"
+ "snd = new Object(); snd.__proto__ = fst;"
+ "var result1 = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result1 = snd.x;"
+ "}"
+ "fst.x = 239;"
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = snd.x;"
+ "}"
+ "result + result1",
+ 239 + 42);
+}
+
+
+// Test the case when we stored field into
+// a stub, but interceptor produced value on its own.
+THREADED_TEST(InterceptorLoadICFieldNotNeeded) {
+ CheckInterceptorLoadIC(
+ InterceptorLoadXICGetter,
+ "proto = new Object();"
+ "o.__proto__ = proto;"
+ "proto.x = 239;"
+ "for (var i = 0; i < 1000; i++) {"
+ " o.x;"
+ // Now it should be ICed and keep a reference to x defined on proto
+ "}"
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result += o.x;"
+ "}"
+ "result;",
+ 42 * 1000);
+}
+
+
+// Test the case when we stored field into
+// a stub, but it got invalidated later on.
+THREADED_TEST(InterceptorLoadICInvalidatedField) {
+ CheckInterceptorLoadIC(
+ InterceptorLoadXICGetter,
+ "proto1 = new Object();"
+ "proto2 = new Object();"
+ "o.__proto__ = proto1;"
+ "proto1.__proto__ = proto2;"
+ "proto2.y = 239;"
+ "for (var i = 0; i < 1000; i++) {"
+ " o.y;"
+ // Now it should be ICed and keep a reference to y defined on proto2
+ "}"
+ "proto1.y = 42;"
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result += o.y;"
+ "}"
+ "result;",
+ 42 * 1000);
+}
+
+
+static int interceptor_load_not_handled_calls = 0;
+static void InterceptorLoadNotHandled(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ++interceptor_load_not_handled_calls;
+}
+
+
+// Test how post-interceptor lookups are done in the non-cacheable
+// case: the interceptor should not be invoked during this lookup.
+THREADED_TEST(InterceptorLoadICPostInterceptor) {
+ interceptor_load_not_handled_calls = 0;
+ CheckInterceptorLoadIC(InterceptorLoadNotHandled,
+ "receiver = new Object();"
+ "receiver.__proto__ = o;"
+ "proto = new Object();"
+ "/* Make proto a slow-case object. */"
+ "for (var i = 0; i < 1000; i++) {"
+ " proto[\"xxxxxxxx\" + i] = [];"
+ "}"
+ "proto.x = 17;"
+ "o.__proto__ = proto;"
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result += receiver.x;"
+ "}"
+ "result;",
+ 17 * 1000);
+ CHECK_EQ(1000, interceptor_load_not_handled_calls);
+}
+
+
+// Test the case when we stored field into
+// a stub, but it got invalidated later on due to override on
+// global object which is between interceptor and fields' holders.
+THREADED_TEST(InterceptorLoadICInvalidatedFieldViaGlobal) {
+ CheckInterceptorLoadIC(
+ InterceptorLoadXICGetter,
+ "o.__proto__ = this;" // set a global to be a proto of o.
+ "this.__proto__.y = 239;"
+ "for (var i = 0; i < 10; i++) {"
+ " if (o.y != 239) throw 'oops: ' + o.y;"
+ // Now it should be ICed and keep a reference to y defined on
+ // field_holder.
+ "}"
+ "this.y = 42;" // Assign on a global.
+ "var result = 0;"
+ "for (var i = 0; i < 10; i++) {"
+ " result += o.y;"
+ "}"
+ "result;",
+ 42 * 10);
+}
+
+
+static void SetOnThis(Local<String> name, Local<Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ Local<Object>::Cast(info.This())->ForceSet(name, value);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
+ templ->SetAccessor(v8_str("y"), Return239Callback);
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+
+ // Check the case when receiver and interceptor's holder
+ // are the same objects.
+ v8::Handle<Value> value = CompileRun(
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result = o.y;"
+ "}");
+ CHECK_EQ(239, value->Int32Value());
+
+ // Check the case when interceptor's holder is in proto chain
+ // of receiver.
+ value = CompileRun(
+ "r = { __proto__: o };"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result = r.y;"
+ "}");
+ CHECK_EQ(239, value->Int32Value());
+}
+
+
+THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ templ_o->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
+ v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
+ templ_p->SetAccessor(v8_str("y"), Return239Callback);
+
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+ context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+
+ // Check the case when receiver and interceptor's holder
+ // are the same objects.
+ v8::Handle<Value> value = CompileRun(
+ "o.__proto__ = p;"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result = o.x + o.y;"
+ "}");
+ CHECK_EQ(239 + 42, value->Int32Value());
+
+ // Check the case when interceptor's holder is in proto chain
+ // of receiver.
+ value = CompileRun(
+ "r = { __proto__: o };"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result = r.x + r.y;"
+ "}");
+ CHECK_EQ(239 + 42, value->Int32Value());
+}
+
+
+THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
+ templ->SetAccessor(v8_str("y"), Return239Callback);
+
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "fst = new Object(); fst.__proto__ = o;"
+ "snd = new Object(); snd.__proto__ = fst;"
+ "var result1 = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result1 = snd.x;"
+ "}"
+ "fst.x = 239;"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result = snd.x;"
+ "}"
+ "result + result1");
+ CHECK_EQ(239 + 42, value->Int32Value());
+}
+
+
+// Test the case when we stored callback into
+// a stub, but interceptor produced value on its own.
+THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ templ_o->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
+ v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
+ templ_p->SetAccessor(v8_str("y"), Return239Callback);
+
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+ context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "o.__proto__ = p;"
+ "for (var i = 0; i < 7; i++) {"
+ " o.x;"
+ // Now it should be ICed and keep a reference to x defined on p
+ "}"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result += o.x;"
+ "}"
+ "result");
+ CHECK_EQ(42 * 7, value->Int32Value());
+}
+
+
+// Test the case when we stored callback into
+// a stub, but it got invalidated later on.
+THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ templ_o->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
+ v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
+ templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
+
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+ context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "inbetween = new Object();"
+ "o.__proto__ = inbetween;"
+ "inbetween.__proto__ = p;"
+ "for (var i = 0; i < 10; i++) {"
+ " o.y;"
+ // Now it should be ICed and keep a reference to y defined on p
+ "}"
+ "inbetween.y = 42;"
+ "var result = 0;"
+ "for (var i = 0; i < 10; i++) {"
+ " result += o.y;"
+ "}"
+ "result");
+ CHECK_EQ(42 * 10, value->Int32Value());
+}
+
+
+// Test the case when we stored callback into
+// a stub, but it got invalidated later on due to override on
+// global object which is between interceptor and callbacks' holders.
+THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ templ_o->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
+ v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
+ templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
+
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+ context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "o.__proto__ = this;"
+ "this.__proto__ = p;"
+ "for (var i = 0; i < 10; i++) {"
+ " if (o.y != 239) throw 'oops: ' + o.y;"
+ // Now it should be ICed and keep a reference to y defined on p
+ "}"
+ "this.y = 42;"
+ "var result = 0;"
+ "for (var i = 0; i < 10; i++) {"
+ " result += o.y;"
+ "}"
+ "result");
+ CHECK_EQ(42 * 10, value->Int32Value());
+}
+
+
+static void InterceptorLoadICGetter0(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(v8_str("x")->Equals(name));
+ info.GetReturnValue().Set(v8::Integer::New(info.GetIsolate(), 0));
+}
+
+
+THREADED_TEST(InterceptorReturningZero) {
+ CheckInterceptorLoadIC(InterceptorLoadICGetter0, "o.x == undefined ? 1 : 0",
+ 0);
+}
+
+
+static void InterceptorStoreICSetter(
+ Local<Name> key, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(v8_str("x")->Equals(key));
+ CHECK_EQ(42, value->Int32Value());
+ info.GetReturnValue().Set(value);
+}
+
+
+// This test should hit the store IC for the interceptor case.
+THREADED_TEST(InterceptorStoreIC) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ InterceptorLoadICGetter, InterceptorStoreICSetter, 0, 0, 0,
+ v8_str("data")));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ CompileRun(
+ "for (var i = 0; i < 1000; i++) {"
+ " o.x = 42;"
+ "}");
+}
+
+
+THREADED_TEST(InterceptorStoreICWithNoSetter) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "for (var i = 0; i < 1000; i++) {"
+ " o.y = 239;"
+ "}"
+ "42 + o.y");
+ CHECK_EQ(239 + 42, value->Int32Value());
+}
+
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ child->Inherit(parent);
+ AddAccessor(parent, v8_str("age"), SimpleAccessorGetter,
+ SimpleAccessorSetter);
+ AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ CompileRun(
+ "var child = new Child;"
+ "child.age = 10;");
+ ExpectBoolean("child.hasOwnProperty('age')", false);
+ ExpectInt32("child.age", 10);
+ ExpectInt32("child.accessor_age", 10);
+}
+
+
+THREADED_TEST(LegacyInterceptorDoesNotSeeSymbols) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ v8::Local<v8::Symbol> age = v8::Symbol::New(isolate, v8_str("age"));
+
+ child->Inherit(parent);
+ AddAccessor(parent, age, SymbolAccessorGetter, SymbolAccessorSetter);
+ AddInterceptor(child, StringInterceptorGetter, StringInterceptorSetter);
+
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()->Set(v8_str("age"), age);
+ CompileRun(
+ "var child = new Child;"
+ "child[age] = 10;");
+ ExpectInt32("child[age]", 10);
+ ExpectBoolean("child.hasOwnProperty('age')", false);
+ ExpectBoolean("child.hasOwnProperty('accessor_age')", true);
+}
+
+
+THREADED_TEST(GenericInterceptorDoesSeeSymbols) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ v8::Local<v8::Symbol> age = v8::Symbol::New(isolate, v8_str("age"));
+ v8::Local<v8::Symbol> anon = v8::Symbol::New(isolate);
+
+ child->Inherit(parent);
+ AddAccessor(parent, age, SymbolAccessorGetter, SymbolAccessorSetter);
+ AddInterceptor(child, GenericInterceptorGetter, GenericInterceptorSetter);
+
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()->Set(v8_str("age"), age);
+ env->Global()->Set(v8_str("anon"), anon);
+ CompileRun(
+ "var child = new Child;"
+ "child[age] = 10;");
+ ExpectInt32("child[age]", 10);
+ ExpectInt32("child._sym_age", 10);
+
+ // Check that it also sees strings.
+ CompileRun("child.foo = 47");
+ ExpectInt32("child.foo", 47);
+ ExpectInt32("child._str_foo", 47);
+
+ // Check that the interceptor can punt (in this case, on anonymous symbols).
+ CompileRun("child[anon] = 31337");
+ ExpectInt32("child[anon]", 31337);
+}
+
+
+THREADED_TEST(NamedPropertyHandlerGetter) {
+ echo_named_call_count = 0;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Handle<v8::FunctionTemplate> templ =
+ v8::FunctionTemplate::New(CcTest::isolate());
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ EchoNamedProperty, 0, 0, 0, 0, v8_str("data")));
+ LocalContext env;
+ env->Global()->Set(v8_str("obj"), templ->GetFunction()->NewInstance());
+ CHECK_EQ(echo_named_call_count, 0);
+ v8_compile("obj.x")->Run();
+ CHECK_EQ(echo_named_call_count, 1);
+ const char* code = "var str = 'oddle'; obj[str] + obj.poddle;";
+ v8::Handle<Value> str = CompileRun(code);
+ String::Utf8Value value(str);
+ CHECK_EQ(0, strcmp(*value, "oddlepoddle"));
+ // Check default behavior
+ CHECK_EQ(10, v8_compile("obj.flob = 10;")->Run()->Int32Value());
+ CHECK(v8_compile("'myProperty' in obj")->Run()->BooleanValue());
+ CHECK(v8_compile("delete obj.myProperty")->Run()->BooleanValue());
+}
+
+
+int echo_indexed_call_count = 0;
+
+
+static void EchoIndexedProperty(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(v8_num(637)->Equals(info.Data()));
+ echo_indexed_call_count++;
+ info.GetReturnValue().Set(v8_num(index));
+}
+
+
+THREADED_TEST(IndexedPropertyHandlerGetter) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ EchoIndexedProperty, 0, 0, 0, 0, v8_num(637)));
+ LocalContext env;
+ env->Global()->Set(v8_str("obj"), templ->GetFunction()->NewInstance());
+ Local<Script> script = v8_compile("obj[900]");
+ CHECK_EQ(script->Run()->Int32Value(), 900);
+}
+
+
+THREADED_TEST(PropertyHandlerInPrototype) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // Set up a prototype chain with three interceptors.
+ v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ CheckThisIndexedPropertyHandler, CheckThisIndexedPropertySetter,
+ CheckThisIndexedPropertyQuery, CheckThisIndexedPropertyDeleter,
+ CheckThisIndexedPropertyEnumerator));
+
+ templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ CheckThisNamedPropertyHandler, CheckThisNamedPropertySetter,
+ CheckThisNamedPropertyQuery, CheckThisNamedPropertyDeleter,
+ CheckThisNamedPropertyEnumerator));
+
+ bottom = templ->GetFunction()->NewInstance();
+ Local<v8::Object> top = templ->GetFunction()->NewInstance();
+ Local<v8::Object> middle = templ->GetFunction()->NewInstance();
+
+ bottom->SetPrototype(middle);
+ middle->SetPrototype(top);
+ env->Global()->Set(v8_str("obj"), bottom);
+
+ // Indexed and named get.
+ CompileRun("obj[0]");
+ CompileRun("obj.x");
+
+ // Indexed and named set.
+ CompileRun("obj[1] = 42");
+ CompileRun("obj.y = 42");
+
+ // Indexed and named query.
+ CompileRun("0 in obj");
+ CompileRun("'x' in obj");
+
+ // Indexed and named deleter.
+ CompileRun("delete obj[0]");
+ CompileRun("delete obj.x");
+
+ // Enumerators.
+ CompileRun("for (var p in obj) ;");
+}
+
+
+static void PrePropertyHandlerGet(
+ Local<Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (v8_str("pre")->Equals(key)) {
+ info.GetReturnValue().Set(v8_str("PrePropertyHandler: pre"));
+ }
+}
+
+
+static void PrePropertyHandlerQuery(
+ Local<Name> key, const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ if (v8_str("pre")->Equals(key)) {
+ info.GetReturnValue().Set(static_cast<int32_t>(v8::None));
+ }
+}
+
+
+THREADED_TEST(PrePropertyHandler) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
+ desc->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ PrePropertyHandlerGet, 0, PrePropertyHandlerQuery));
+ LocalContext env(NULL, desc->InstanceTemplate());
+ CompileRun("var pre = 'Object: pre'; var on = 'Object: on';");
+ v8::Handle<Value> result_pre = CompileRun("pre");
+ CHECK(v8_str("PrePropertyHandler: pre")->Equals(result_pre));
+ v8::Handle<Value> result_on = CompileRun("on");
+ CHECK(v8_str("Object: on")->Equals(result_on));
+ v8::Handle<Value> result_post = CompileRun("post");
+ CHECK(result_post.IsEmpty());
+}
+
+
+THREADED_TEST(EmptyInterceptorBreakTransitions) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Constructor"), templ->GetFunction());
+ CompileRun(
+ "var o1 = new Constructor;"
+ "o1.a = 1;" // Ensure a and x share the descriptor array.
+ "Object.defineProperty(o1, 'x', {value: 10});");
+ CompileRun(
+ "var o2 = new Constructor;"
+ "o2.a = 1;"
+ "Object.defineProperty(o2, 'x', {value: 10});");
+}
+
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ child->Inherit(parent);
+ AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ CompileRun(
+ "var child = new Child;"
+ "var parent = child.__proto__;"
+ "Object.defineProperty(parent, 'age', "
+ " {get: function(){ return this.accessor_age; }, "
+ " set: function(v){ this.accessor_age = v; }, "
+ " enumerable: true, configurable: true});"
+ "child.age = 10;");
+ ExpectBoolean("child.hasOwnProperty('age')", false);
+ ExpectInt32("child.age", 10);
+ ExpectInt32("child.accessor_age", 10);
+}
+
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowApiAccessors) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ auto returns_42 = FunctionTemplate::New(isolate, Returns42);
+ parent->PrototypeTemplate()->SetAccessorProperty(v8_str("age"), returns_42);
+ Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ child->Inherit(parent);
+ AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ CompileRun(
+ "var child = new Child;"
+ "var parent = child.__proto__;");
+ ExpectBoolean("child.hasOwnProperty('age')", false);
+ ExpectInt32("child.age", 42);
+ // Check interceptor followup.
+ ExpectInt32(
+ "var result;"
+ "for (var i = 0; i < 4; ++i) {"
+ " result = child.age;"
+ "}"
+ "result",
+ 42);
+}
+
+
+THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ child->Inherit(parent);
+ AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ CompileRun(
+ "var child = new Child;"
+ "var parent = child.__proto__;"
+ "parent.name = 'Alice';");
+ ExpectBoolean("child.hasOwnProperty('name')", false);
+ ExpectString("child.name", "Alice");
+ CompileRun("child.name = 'Bob';");
+ ExpectString("child.name", "Bob");
+ ExpectBoolean("child.hasOwnProperty('name')", true);
+ ExpectString("parent.name", "Alice");
+}
+
+
+THREADED_TEST(SwitchFromInterceptorToAccessor) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddAccessor(templ, v8_str("age"), SimpleAccessorGetter, SimpleAccessorSetter);
+ AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ CompileRun(
+ "var obj = new Obj;"
+ "function setAge(i){ obj.age = i; };"
+ "for(var i = 0; i <= 10000; i++) setAge(i);");
+ // All i < 10000 go to the interceptor.
+ ExpectInt32("obj.interceptor_age", 9999);
+ // The last i goes to the accessor.
+ ExpectInt32("obj.accessor_age", 10000);
+}
+
+
+THREADED_TEST(SwitchFromAccessorToInterceptor) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddAccessor(templ, v8_str("age"), SimpleAccessorGetter, SimpleAccessorSetter);
+ AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ CompileRun(
+ "var obj = new Obj;"
+ "function setAge(i){ obj.age = i; };"
+ "for(var i = 20000; i >= 9999; i--) setAge(i);");
+ // All i >= 10000 go to the accessor.
+ ExpectInt32("obj.accessor_age", 10000);
+ // The last i goes to the interceptor.
+ ExpectInt32("obj.interceptor_age", 9999);
+}
+
+
+THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ child->Inherit(parent);
+ AddAccessor(parent, v8_str("age"), SimpleAccessorGetter,
+ SimpleAccessorSetter);
+ AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ CompileRun(
+ "var child = new Child;"
+ "function setAge(i){ child.age = i; };"
+ "for(var i = 0; i <= 10000; i++) setAge(i);");
+ // All i < 10000 go to the interceptor.
+ ExpectInt32("child.interceptor_age", 9999);
+ // The last i goes to the accessor.
+ ExpectInt32("child.accessor_age", 10000);
+}
+
+
+THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ child->Inherit(parent);
+ AddAccessor(parent, v8_str("age"), SimpleAccessorGetter,
+ SimpleAccessorSetter);
+ AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ CompileRun(
+ "var child = new Child;"
+ "function setAge(i){ child.age = i; };"
+ "for(var i = 20000; i >= 9999; i--) setAge(i);");
+ // All i >= 10000 go to the accessor.
+ ExpectInt32("child.accessor_age", 10000);
+ // The last i goes to the interceptor.
+ ExpectInt32("child.interceptor_age", 9999);
+}
+
+
+THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ CompileRun(
+ "var obj = new Obj;"
+ "function setter(i) { this.accessor_age = i; };"
+ "function getter() { return this.accessor_age; };"
+ "function setAge(i) { obj.age = i; };"
+ "Object.defineProperty(obj, 'age', { get:getter, set:setter });"
+ "for(var i = 0; i <= 10000; i++) setAge(i);");
+ // All i < 10000 go to the interceptor.
+ ExpectInt32("obj.interceptor_age", 9999);
+ // The last i goes to the JavaScript accessor.
+ ExpectInt32("obj.accessor_age", 10000);
+ // The installed JavaScript getter is still intact.
+ // This last part is a regression test for issue 1651 and relies on the fact
+ // that both interceptor and accessor are being installed on the same object.
+ ExpectInt32("obj.age", 10000);
+ ExpectBoolean("obj.hasOwnProperty('age')", true);
+ ExpectUndefined("Object.getOwnPropertyDescriptor(obj, 'age').value");
+}
+
+
+THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ CompileRun(
+ "var obj = new Obj;"
+ "function setter(i) { this.accessor_age = i; };"
+ "function getter() { return this.accessor_age; };"
+ "function setAge(i) { obj.age = i; };"
+ "Object.defineProperty(obj, 'age', { get:getter, set:setter });"
+ "for(var i = 20000; i >= 9999; i--) setAge(i);");
+ // All i >= 10000 go to the accessor.
+ ExpectInt32("obj.accessor_age", 10000);
+ // The last i goes to the interceptor.
+ ExpectInt32("obj.interceptor_age", 9999);
+ // The installed JavaScript getter is still intact.
+ // This last part is a regression test for issue 1651 and relies on the fact
+ // that both interceptor and accessor are being installed on the same object.
+ ExpectInt32("obj.age", 10000);
+ ExpectBoolean("obj.hasOwnProperty('age')", true);
+ ExpectUndefined("Object.getOwnPropertyDescriptor(obj, 'age').value");
+}
+
+
+THREADED_TEST(SwitchFromInterceptorToProperty) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ child->Inherit(parent);
+ AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ CompileRun(
+ "var child = new Child;"
+ "function setAge(i){ child.age = i; };"
+ "for(var i = 0; i <= 10000; i++) setAge(i);");
+ // All i < 10000 go to the interceptor.
+ ExpectInt32("child.interceptor_age", 9999);
+ // The last i goes to child's own property.
+ ExpectInt32("child.age", 10000);
+}
+
+
+THREADED_TEST(SwitchFromPropertyToInterceptor) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ child->Inherit(parent);
+ AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Child"), child->GetFunction());
+ CompileRun(
+ "var child = new Child;"
+ "function setAge(i){ child.age = i; };"
+ "for(var i = 20000; i >= 9999; i--) setAge(i);");
+ // All i >= 10000 go to child's own property.
+ ExpectInt32("child.age", 10000);
+ // The last i goes to the interceptor.
+ ExpectInt32("child.interceptor_age", 9999);
+}
+
+
+static bool interceptor_for_hidden_properties_called;
+static void InterceptorForHiddenProperties(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ interceptor_for_hidden_properties_called = true;
+}
+
+
+THREADED_TEST(HiddenPropertiesWithInterceptors) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ interceptor_for_hidden_properties_called = false;
+
+ v8::Local<v8::String> key = v8_str("api-test::hidden-key");
+
+ // Associate an interceptor with an object and start setting hidden values.
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
+ Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
+ instance_templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorForHiddenProperties));
+ Local<v8::Function> function = fun_templ->GetFunction();
+ Local<v8::Object> obj = function->NewInstance();
+ CHECK(obj->SetHiddenValue(key, v8::Integer::New(isolate, 2302)));
+ CHECK_EQ(2302, obj->GetHiddenValue(key)->Int32Value());
+ CHECK(!interceptor_for_hidden_properties_called);
+}
+
+
+static void XPropertyGetter(Local<Name> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(info.Data()->IsUndefined());
+ info.GetReturnValue().Set(property);
+}
+
+
+THREADED_TEST(NamedInterceptorPropertyRead) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ Local<Script> script = v8_compile("obj.x");
+ for (int i = 0; i < 10; i++) {
+ Local<Value> result = script->Run();
+ CHECK(result->Equals(v8_str("x")));
+ }
+}
+
+
+THREADED_TEST(NamedInterceptorDictionaryIC) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
+ LocalContext context;
+ // Create an object with a named interceptor.
+ context->Global()->Set(v8_str("interceptor_obj"), templ->NewInstance());
+ Local<Script> script = v8_compile("interceptor_obj.x");
+ for (int i = 0; i < 10; i++) {
+ Local<Value> result = script->Run();
+ CHECK(result->Equals(v8_str("x")));
+ }
+ // Create a slow case object and a function accessing a property in
+ // that slow case object (with dictionary probing in generated
+ // code). Then force object with a named interceptor into slow-case,
+ // pass it to the function, and check that the interceptor is called
+ // instead of accessing the local property.
+ Local<Value> result = CompileRun(
+ "function get_x(o) { return o.x; };"
+ "var obj = { x : 42, y : 0 };"
+ "delete obj.y;"
+ "for (var i = 0; i < 10; i++) get_x(obj);"
+ "interceptor_obj.x = 42;"
+ "interceptor_obj.y = 10;"
+ "delete interceptor_obj.y;"
+ "get_x(interceptor_obj)");
+ CHECK(result->Equals(v8_str("x")));
+}
+
+
+THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<Context> context1 = Context::New(isolate);
+
+ context1->Enter();
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
+ // Create an object with a named interceptor.
+ v8::Local<v8::Object> object = templ->NewInstance();
+ context1->Global()->Set(v8_str("interceptor_obj"), object);
+
+ // Force the object into the slow case.
+ CompileRun(
+ "interceptor_obj.y = 0;"
+ "delete interceptor_obj.y;");
+ context1->Exit();
+
+ {
+ // Introduce the object into a different context.
+ // Repeat named loads to exercise ICs.
+ LocalContext context2;
+ context2->Global()->Set(v8_str("interceptor_obj"), object);
+ Local<Value> result = CompileRun(
+ "function get_x(o) { return o.x; }"
+ "interceptor_obj.x = 42;"
+ "for (var i=0; i != 10; i++) {"
+ " get_x(interceptor_obj);"
+ "}"
+ "get_x(interceptor_obj)");
+ // Check that the interceptor was actually invoked.
+ CHECK(result->Equals(v8_str("x")));
+ }
+
+ // Return to the original context and force some object to the slow case
+ // to cause the NormalizedMapCache to verify.
+ context1->Enter();
+ CompileRun("var obj = { x : 0 }; delete obj.x;");
+ context1->Exit();
+}
+
+
+static void SetXOnPrototypeGetter(
+ Local<Name> property, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ // Set x on the prototype object and do not handle the get request.
+ v8::Handle<v8::Value> proto = info.Holder()->GetPrototype();
+ proto.As<v8::Object>()->Set(v8_str("x"),
+ v8::Integer::New(info.GetIsolate(), 23));
+}
+
+
+// This is a regression test for http://crbug.com/20104. Map
+// transitions should not interfere with post interceptor lookup.
+THREADED_TEST(NamedInterceptorMapTransitionRead) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> function_template =
+ v8::FunctionTemplate::New(isolate);
+ Local<v8::ObjectTemplate> instance_template =
+ function_template->InstanceTemplate();
+ instance_template->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(SetXOnPrototypeGetter));
+ LocalContext context;
+ context->Global()->Set(v8_str("F"), function_template->GetFunction());
+ // Create an instance of F and introduce a map transition for x.
+ CompileRun("var o = new F(); o.x = 23;");
+ // Create an instance of F and invoke the getter. The result should be 23.
+ Local<Value> result = CompileRun("o = new F(); o.x");
+ CHECK_EQ(result->Int32Value(), 23);
+}
+
+
+static void IndexedPropertyGetter(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index == 37) {
+ info.GetReturnValue().Set(v8_num(625));
+ }
+}
+
+
+static void IndexedPropertySetter(
+ uint32_t index, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index == 39) {
+ info.GetReturnValue().Set(value);
+ }
+}
+
+
+THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ IndexedPropertyGetter, IndexedPropertySetter));
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ Local<Script> getter_script =
+ v8_compile("obj.__defineGetter__(\"3\", function(){return 5;});obj[3];");
+ Local<Script> setter_script = v8_compile(
+ "obj.__defineSetter__(\"17\", function(val){this.foo = val;});"
+ "obj[17] = 23;"
+ "obj.foo;");
+ Local<Script> interceptor_setter_script = v8_compile(
+ "obj.__defineSetter__(\"39\", function(val){this.foo = \"hit\";});"
+ "obj[39] = 47;"
+ "obj.foo;"); // This setter should not run, due to the interceptor.
+ Local<Script> interceptor_getter_script = v8_compile("obj[37];");
+ Local<Value> result = getter_script->Run();
+ CHECK(v8_num(5)->Equals(result));
+ result = setter_script->Run();
+ CHECK(v8_num(23)->Equals(result));
+ result = interceptor_setter_script->Run();
+ CHECK(v8_num(23)->Equals(result));
+ result = interceptor_getter_script->Run();
+ CHECK(v8_num(625)->Equals(result));
+}
+
+
+static void UnboxedDoubleIndexedPropertyGetter(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index < 25) {
+ info.GetReturnValue().Set(v8_num(index));
+ }
+}
+
+
+static void UnboxedDoubleIndexedPropertySetter(
+ uint32_t index, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index < 25) {
+ info.GetReturnValue().Set(v8_num(index));
+ }
+}
+
+
+void UnboxedDoubleIndexedPropertyEnumerator(
+ const v8::PropertyCallbackInfo<v8::Array>& info) {
+ // Force the list of returned keys to be stored in a FastDoubleArray.
+ Local<Script> indexed_property_names_script = v8_compile(
+ "keys = new Array(); keys[125000] = 1;"
+ "for(i = 0; i < 80000; i++) { keys[i] = i; };"
+ "keys.length = 25; keys;");
+ Local<Value> result = indexed_property_names_script->Run();
+ info.GetReturnValue().Set(Local<v8::Array>::Cast(result));
+}
+
+
+// Make sure that the the interceptor code in the runtime properly handles
+// merging property name lists for double-array-backed arrays.
+THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ UnboxedDoubleIndexedPropertyGetter, UnboxedDoubleIndexedPropertySetter, 0,
+ 0, UnboxedDoubleIndexedPropertyEnumerator));
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ // When obj is created, force it to be Stored in a FastDoubleArray.
+ Local<Script> create_unboxed_double_script = v8_compile(
+ "obj[125000] = 1; for(i = 0; i < 80000; i+=2) { obj[i] = i; } "
+ "key_count = 0; "
+ "for (x in obj) {key_count++;};"
+ "obj;");
+ Local<Value> result = create_unboxed_double_script->Run();
+ CHECK(result->ToObject(isolate)->HasRealIndexedProperty(2000));
+ Local<Script> key_count_check = v8_compile("key_count;");
+ result = key_count_check->Run();
+ CHECK(v8_num(40013)->Equals(result));
+}
+
+
+void SloppyArgsIndexedPropertyEnumerator(
+ const v8::PropertyCallbackInfo<v8::Array>& info) {
+ // Force the list of returned keys to be stored in a Arguments object.
+ Local<Script> indexed_property_names_script = v8_compile(
+ "function f(w,x) {"
+ " return arguments;"
+ "}"
+ "keys = f(0, 1, 2, 3);"
+ "keys;");
+ Local<Object> result =
+ Local<Object>::Cast(indexed_property_names_script->Run());
+ // Have to populate the handle manually, as it's not Cast-able.
+ i::Handle<i::JSObject> o = v8::Utils::OpenHandle<Object, i::JSObject>(result);
+ i::Handle<i::JSArray> array(reinterpret_cast<i::JSArray*>(*o));
+ info.GetReturnValue().Set(v8::Utils::ToLocal(array));
+}
+
+
+static void SloppyIndexedPropertyGetter(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index < 4) {
+ info.GetReturnValue().Set(v8_num(index));
+ }
+}
+
+
+// Make sure that the the interceptor code in the runtime properly handles
+// merging property name lists for non-string arguments arrays.
+THREADED_TEST(IndexedInterceptorSloppyArgsWithIndexedAccessor) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ SloppyIndexedPropertyGetter, 0, 0, 0,
+ SloppyArgsIndexedPropertyEnumerator));
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ Local<Script> create_args_script = v8_compile(
+ "var key_count = 0;"
+ "for (x in obj) {key_count++;} key_count;");
+ Local<Value> result = create_args_script->Run();
+ CHECK(v8_num(4)->Equals(result));
+}
+
+
+static void IdentityIndexedPropertyGetter(
+ uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(index);
+}
+
+
+THREADED_TEST(IndexedInterceptorWithGetOwnPropertyDescriptor) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+
+ // Check fast object case.
+ const char* fast_case_code =
+ "Object.getOwnPropertyDescriptor(obj, 0).value.toString()";
+ ExpectString(fast_case_code, "0");
+
+ // Check slow case.
+ const char* slow_case_code =
+ "obj.x = 1; delete obj.x;"
+ "Object.getOwnPropertyDescriptor(obj, 1).value.toString()";
+ ExpectString(slow_case_code, "1");
+}
+
+
+THREADED_TEST(IndexedInterceptorWithNoSetter) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+
+ const char* code =
+ "try {"
+ " obj[0] = 239;"
+ " for (var i = 0; i < 100; i++) {"
+ " var v = obj[0];"
+ " if (v != 0) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorWithAccessorCheck) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ obj->TurnOnAccessCheck();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "var result = 'PASSED';"
+ "for (var i = 0; i < 100; i++) {"
+ " try {"
+ " var v = obj[0];"
+ " result = 'Wrong value ' + v + ' at iteration ' + i;"
+ " break;"
+ " } catch (e) {"
+ " /* pass */"
+ " }"
+ "}"
+ "result";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorWithAccessorCheckSwitchedOn) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "var result = 'PASSED';"
+ "for (var i = 0; i < 100; i++) {"
+ " var expected = i;"
+ " if (i == 5) {"
+ " %EnableAccessChecks(obj);"
+ " }"
+ " try {"
+ " var v = obj[i];"
+ " if (i == 5) {"
+ " result = 'Should not have reached this!';"
+ " break;"
+ " } else if (v != expected) {"
+ " result = 'Wrong value ' + v + ' at iteration ' + i;"
+ " break;"
+ " }"
+ " } catch (e) {"
+ " if (i != 5) {"
+ " result = e;"
+ " }"
+ " }"
+ " if (i == 5) %DisableAccessChecks(obj);"
+ "}"
+ "result";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorWithDifferentIndices) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var v = obj[i];"
+ " if (v != i) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorWithNegativeIndices) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var expected = i;"
+ " var key = i;"
+ " if (i == 25) {"
+ " key = -1;"
+ " expected = undefined;"
+ " }"
+ " if (i == 50) {"
+ " /* probe minimal Smi number on 32-bit platforms */"
+ " key = -(1 << 30);"
+ " expected = undefined;"
+ " }"
+ " if (i == 75) {"
+ " /* probe minimal Smi number on 64-bit platforms */"
+ " key = 1 << 31;"
+ " expected = undefined;"
+ " }"
+ " var v = obj[key];"
+ " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorWithNotSmiLookup) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var expected = i;"
+ " var key = i;"
+ " if (i == 50) {"
+ " key = 'foobar';"
+ " expected = undefined;"
+ " }"
+ " var v = obj[key];"
+ " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorGoingMegamorphic) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "var original = obj;"
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var expected = i;"
+ " if (i == 50) {"
+ " obj = {50: 'foobar'};"
+ " expected = 'foobar';"
+ " }"
+ " var v = obj[i];"
+ " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " if (i == 50) obj = original;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorReceiverTurningSmi) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "var original = obj;"
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var expected = i;"
+ " if (i == 5) {"
+ " obj = 239;"
+ " expected = undefined;"
+ " }"
+ " var v = obj[i];"
+ " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " if (i == 5) obj = original;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+THREADED_TEST(IndexedInterceptorOnProto) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
+
+ LocalContext context;
+ Local<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+
+ const char* code =
+ "var o = {__proto__: obj};"
+ "try {"
+ " for (var i = 0; i < 100; i++) {"
+ " var v = o[i];"
+ " if (v != i) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
+static void NoBlockGetterX(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>&) {}
+
+
+static void NoBlockGetterI(uint32_t index,
+ const v8::PropertyCallbackInfo<v8::Value>&) {}
+
+
+static void PDeleter(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ if (!name->Equals(v8_str("foo"))) {
+ return; // not intercepted
+ }
+
+ info.GetReturnValue().Set(false); // intercepted, don't delete the property
+}
+
+
+static void IDeleter(uint32_t index,
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ if (index != 2) {
+ return; // not intercepted
+ }
+
+ info.GetReturnValue().Set(false); // intercepted, don't delete the property
+}
+
+
+THREADED_TEST(Deleter) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ obj->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX, NULL,
+ NULL, PDeleter, NULL));
+ obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ NoBlockGetterI, NULL, NULL, IDeleter, NULL));
+ LocalContext context;
+ context->Global()->Set(v8_str("k"), obj->NewInstance());
+ CompileRun(
+ "k.foo = 'foo';"
+ "k.bar = 'bar';"
+ "k[2] = 2;"
+ "k[4] = 4;");
+ CHECK(v8_compile("delete k.foo")->Run()->IsFalse());
+ CHECK(v8_compile("delete k.bar")->Run()->IsTrue());
+
+ CHECK(v8_compile("k.foo")->Run()->Equals(v8_str("foo")));
+ CHECK(v8_compile("k.bar")->Run()->IsUndefined());
+
+ CHECK(v8_compile("delete k[2]")->Run()->IsFalse());
+ CHECK(v8_compile("delete k[4]")->Run()->IsTrue());
+
+ CHECK(v8_compile("k[2]")->Run()->Equals(v8_num(2)));
+ CHECK(v8_compile("k[4]")->Run()->IsUndefined());
+}
+
+
+static void GetK(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (name->Equals(v8_str("foo")) || name->Equals(v8_str("bar")) ||
+ name->Equals(v8_str("baz"))) {
+ info.GetReturnValue().SetUndefined();
+ }
+}
+
+
+static void IndexedGetK(uint32_t index,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index == 0 || index == 1) info.GetReturnValue().SetUndefined();
+}
+
+
+static void NamedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 3);
+ result->Set(v8::Integer::New(info.GetIsolate(), 0), v8_str("foo"));
+ result->Set(v8::Integer::New(info.GetIsolate(), 1), v8_str("bar"));
+ result->Set(v8::Integer::New(info.GetIsolate(), 2), v8_str("baz"));
+ info.GetReturnValue().Set(result);
+}
+
+
+static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
+ result->Set(v8::Integer::New(info.GetIsolate(), 0), v8_str("0"));
+ result->Set(v8::Integer::New(info.GetIsolate(), 1), v8_str("1"));
+ info.GetReturnValue().Set(result);
+}
+
+
+THREADED_TEST(Enumerators) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ obj->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(GetK, NULL, NULL, NULL, NamedEnum));
+ obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ IndexedGetK, NULL, NULL, NULL, IndexedEnum));
+ LocalContext context;
+ context->Global()->Set(v8_str("k"), obj->NewInstance());
+ v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
+ "k[10] = 0;"
+ "k.a = 0;"
+ "k[5] = 0;"
+ "k.b = 0;"
+ "k[4294967295] = 0;"
+ "k.c = 0;"
+ "k[4294967296] = 0;"
+ "k.d = 0;"
+ "k[140000] = 0;"
+ "k.e = 0;"
+ "k[30000000000] = 0;"
+ "k.f = 0;"
+ "var result = [];"
+ "for (var prop in k) {"
+ " result.push(prop);"
+ "}"
+ "result"));
+ // Check that we get all the property names returned including the
+ // ones from the enumerators in the right order: indexed properties
+ // in numerical order, indexed interceptor properties, named
+ // properties in insertion order, named interceptor properties.
+ // This order is not mandated by the spec, so this test is just
+ // documenting our behavior.
+ CHECK_EQ(17u, result->Length());
+ // Indexed properties in numerical order.
+ CHECK(v8_str("5")->Equals(result->Get(v8::Integer::New(isolate, 0))));
+ CHECK(v8_str("10")->Equals(result->Get(v8::Integer::New(isolate, 1))));
+ CHECK(v8_str("140000")->Equals(result->Get(v8::Integer::New(isolate, 2))));
+ CHECK(
+ v8_str("4294967295")->Equals(result->Get(v8::Integer::New(isolate, 3))));
+ // Indexed interceptor properties in the order they are returned
+ // from the enumerator interceptor.
+ CHECK(v8_str("0")->Equals(result->Get(v8::Integer::New(isolate, 4))));
+ CHECK(v8_str("1")->Equals(result->Get(v8::Integer::New(isolate, 5))));
+ // Named properties in insertion order.
+ CHECK(v8_str("a")->Equals(result->Get(v8::Integer::New(isolate, 6))));
+ CHECK(v8_str("b")->Equals(result->Get(v8::Integer::New(isolate, 7))));
+ CHECK(v8_str("c")->Equals(result->Get(v8::Integer::New(isolate, 8))));
+ CHECK(
+ v8_str("4294967296")->Equals(result->Get(v8::Integer::New(isolate, 9))));
+ CHECK(v8_str("d")->Equals(result->Get(v8::Integer::New(isolate, 10))));
+ CHECK(v8_str("e")->Equals(result->Get(v8::Integer::New(isolate, 11))));
+ CHECK(v8_str("30000000000")
+ ->Equals(result->Get(v8::Integer::New(isolate, 12))));
+ CHECK(v8_str("f")->Equals(result->Get(v8::Integer::New(isolate, 13))));
+ // Named interceptor properties.
+ CHECK(v8_str("foo")->Equals(result->Get(v8::Integer::New(isolate, 14))));
+ CHECK(v8_str("bar")->Equals(result->Get(v8::Integer::New(isolate, 15))));
+ CHECK(v8_str("baz")->Equals(result->Get(v8::Integer::New(isolate, 16))));
+}
+
+
+v8::Handle<Value> call_ic_function;
+v8::Handle<Value> call_ic_function2;
+v8::Handle<Value> call_ic_function3;
+
+static void InterceptorCallICGetter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(v8_str("x")->Equals(name));
+ info.GetReturnValue().Set(call_ic_function);
+}
+
+
+// This test should hit the call IC for the interceptor case.
+THREADED_TEST(InterceptorCallIC) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ call_ic_function = v8_compile("function f(x) { return x + 1; }; f")->Run();
+ v8::Handle<Value> value = CompileRun(
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.x(41);"
+ "}");
+ CHECK_EQ(42, value->Int32Value());
+}
+
+
+// This test checks that if interceptor doesn't provide
+// a value, we can fetch regular value.
+THREADED_TEST(InterceptorCallICSeesOthers) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "o.x = function f(x) { return x + 1; };"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result = o.x(41);"
+ "}");
+ CHECK_EQ(42, value->Int32Value());
+}
+
+
+static v8::Handle<Value> call_ic_function4;
+static void InterceptorCallICGetter4(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(v8_str("x")->Equals(name));
+ info.GetReturnValue().Set(call_ic_function4);
+}
+
+
+// This test checks that if interceptor provides a function,
+// even if we cached shadowed variant, interceptor's function
+// is invoked
+THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter4));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ call_ic_function4 = v8_compile("function f(x) { return x - 1; }; f")->Run();
+ v8::Handle<Value> value = CompileRun(
+ "Object.getPrototypeOf(o).x = function(x) { return x + 1; };"
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.x(42);"
+ "}");
+ CHECK_EQ(41, value->Int32Value());
+}
+
+
+// Test the case when we stored cacheable lookup into
+// a stub, but it got invalidated later on
+THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "proto1 = new Object();"
+ "proto2 = new Object();"
+ "o.__proto__ = proto1;"
+ "proto1.__proto__ = proto2;"
+ "proto2.y = function(x) { return x + 1; };"
+ // Invoke it many times to compile a stub
+ "for (var i = 0; i < 7; i++) {"
+ " o.y(42);"
+ "}"
+ "proto1.y = function(x) { return x - 1; };"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result += o.y(42);"
+ "}");
+ CHECK_EQ(41 * 7, value->Int32Value());
+}
+
+
+// This test checks that if interceptor doesn't provide a function,
+// cached constant function is used
+THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "function inc(x) { return x + 1; };"
+ "inc(1);"
+ "o.x = inc;"
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.x(42);"
+ "}");
+ CHECK_EQ(43, value->Int32Value());
+}
+
+
+static v8::Handle<Value> call_ic_function5;
+static void InterceptorCallICGetter5(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (v8_str("x")->Equals(name)) info.GetReturnValue().Set(call_ic_function5);
+}
+
+
+// This test checks that if interceptor provides a function,
+// even if we cached constant function, interceptor's function
+// is invoked
+THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter5));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ call_ic_function5 = v8_compile("function f(x) { return x - 1; }; f")->Run();
+ v8::Handle<Value> value = CompileRun(
+ "function inc(x) { return x + 1; };"
+ "inc(1);"
+ "o.x = inc;"
+ "var result = 0;"
+ "for (var i = 0; i < 1000; i++) {"
+ " result = o.x(42);"
+ "}");
+ CHECK_EQ(41, value->Int32Value());
+}
+
+
+static v8::Handle<Value> call_ic_function6;
+static void InterceptorCallICGetter6(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (v8_str("x")->Equals(name)) info.GetReturnValue().Set(call_ic_function6);
+}
+
+
+// Same test as above, except the code is wrapped in a function
+// to test the optimized compiler.
+THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter6));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ call_ic_function6 = v8_compile("function f(x) { return x - 1; }; f")->Run();
+ v8::Handle<Value> value = CompileRun(
+ "function inc(x) { return x + 1; };"
+ "inc(1);"
+ "o.x = inc;"
+ "function test() {"
+ " var result = 0;"
+ " for (var i = 0; i < 1000; i++) {"
+ " result = o.x(42);"
+ " }"
+ " return result;"
+ "};"
+ "test();"
+ "test();"
+ "test();"
+ "%OptimizeFunctionOnNextCall(test);"
+ "test()");
+ CHECK_EQ(41, value->Int32Value());
+}
+
+
+// Test the case when we stored constant function into
+// a stub, but it got invalidated later on
+THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "function inc(x) { return x + 1; };"
+ "inc(1);"
+ "proto1 = new Object();"
+ "proto2 = new Object();"
+ "o.__proto__ = proto1;"
+ "proto1.__proto__ = proto2;"
+ "proto2.y = inc;"
+ // Invoke it many times to compile a stub
+ "for (var i = 0; i < 7; i++) {"
+ " o.y(42);"
+ "}"
+ "proto1.y = function(x) { return x - 1; };"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result += o.y(42);"
+ "}");
+ CHECK_EQ(41 * 7, value->Int32Value());
+}
+
+
+// Test the case when we stored constant function into
+// a stub, but it got invalidated later on due to override on
+// global object which is between interceptor and constant function' holders.
+THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "function inc(x) { return x + 1; };"
+ "inc(1);"
+ "o.__proto__ = this;"
+ "this.__proto__.y = inc;"
+ // Invoke it many times to compile a stub
+ "for (var i = 0; i < 7; i++) {"
+ " if (o.y(42) != 43) throw 'oops: ' + o.y(42);"
+ "}"
+ "this.y = function(x) { return x - 1; };"
+ "var result = 0;"
+ "for (var i = 0; i < 7; i++) {"
+ " result += o.y(42);"
+ "}");
+ CHECK_EQ(41 * 7, value->Int32Value());
+}
+
+
+// Test the case when actual function to call sits on global object.
+THREADED_TEST(InterceptorCallICCachedFromGlobal) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+
+ v8::Handle<Value> value = CompileRun(
+ "try {"
+ " o.__proto__ = this;"
+ " for (var i = 0; i < 10; i++) {"
+ " var v = o.parseFloat('239');"
+ " if (v != 239) throw v;"
+ // Now it should be ICed and keep a reference to parseFloat.
+ " }"
+ " var result = 0;"
+ " for (var i = 0; i < 10; i++) {"
+ " result += o.parseFloat('239');"
+ " }"
+ " result"
+ "} catch(e) {"
+ " e"
+ "};");
+ CHECK_EQ(239 * 10, value->Int32Value());
+}
+
+
+v8::Handle<Value> keyed_call_ic_function;
+
+static void InterceptorKeyedCallICGetter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (v8_str("x")->Equals(name)) {
+ info.GetReturnValue().Set(keyed_call_ic_function);
+ }
+}
+
+
+// Test the case when we stored cacheable lookup into
+// a stub, but the function name changed (to another cacheable function).
+THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ CompileRun(
+ "proto = new Object();"
+ "proto.y = function(x) { return x + 1; };"
+ "proto.z = function(x) { return x - 1; };"
+ "o.__proto__ = proto;"
+ "var result = 0;"
+ "var method = 'y';"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { method = 'z'; };"
+ " result += o[method](41);"
+ "}");
+ CHECK_EQ(42 * 5 + 40 * 5,
+ context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
+// Test the case when we stored cacheable lookup into
+// a stub, but the function name changed (and the new function is present
+// both before and after the interceptor in the prototype chain).
+THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorKeyedCallICGetter));
+ LocalContext context;
+ context->Global()->Set(v8_str("proto1"), templ->NewInstance());
+ keyed_call_ic_function =
+ v8_compile("function f(x) { return x - 1; }; f")->Run();
+ CompileRun(
+ "o = new Object();"
+ "proto2 = new Object();"
+ "o.y = function(x) { return x + 1; };"
+ "proto2.y = function(x) { return x + 2; };"
+ "o.__proto__ = proto1;"
+ "proto1.__proto__ = proto2;"
+ "var result = 0;"
+ "var method = 'x';"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { method = 'y'; };"
+ " result += o[method](41);"
+ "}");
+ CHECK_EQ(42 * 5 + 40 * 5,
+ context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
+// Same as InterceptorKeyedCallICKeyChange1 only the cacheable function sit
+// on the global object.
+THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ CompileRun(
+ "function inc(x) { return x + 1; };"
+ "inc(1);"
+ "function dec(x) { return x - 1; };"
+ "dec(1);"
+ "o.__proto__ = this;"
+ "this.__proto__.x = inc;"
+ "this.__proto__.y = dec;"
+ "var result = 0;"
+ "var method = 'x';"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { method = 'y'; };"
+ " result += o[method](41);"
+ "}");
+ CHECK_EQ(42 * 5 + 40 * 5,
+ context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
+// Test the case when actual function to call sits on global object.
+THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+
+ CompileRun(
+ "function len(x) { return x.length; };"
+ "o.__proto__ = this;"
+ "var m = 'parseFloat';"
+ "var result = 0;"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) {"
+ " m = 'len';"
+ " saved_result = result;"
+ " };"
+ " result = o[m]('239');"
+ "}");
+ CHECK_EQ(3, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(239, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+}
+
+
+// Test the map transition before the interceptor.
+THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
+
+ CompileRun(
+ "var o = new Object();"
+ "o.__proto__ = proto;"
+ "o.method = function(x) { return x + 1; };"
+ "var m = 'method';"
+ "var result = 0;"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { o.method = function(x) { return x - 1; }; };"
+ " result += o[m](41);"
+ "}");
+ CHECK_EQ(42 * 5 + 40 * 5,
+ context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
+// Test the map transition after the interceptor.
+THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+
+ CompileRun(
+ "var proto = new Object();"
+ "o.__proto__ = proto;"
+ "proto.method = function(x) { return x + 1; };"
+ "var m = 'method';"
+ "var result = 0;"
+ "for (var i = 0; i < 10; i++) {"
+ " if (i == 5) { proto.method = function(x) { return x - 1; }; };"
+ " result += o[m](41);"
+ "}");
+ CHECK_EQ(42 * 5 + 40 * 5,
+ context->Global()->Get(v8_str("result"))->Int32Value());
+}
+
+
+static int interceptor_call_count = 0;
+
+static void InterceptorICRefErrorGetter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (v8_str("x")->Equals(name) && interceptor_call_count++ < 20) {
+ info.GetReturnValue().Set(call_ic_function2);
+ }
+}
+
+
+// This test should hit load and call ICs for the interceptor case.
+// Once in a while, the interceptor will reply that a property was not
+// found in which case we should get a reference error.
+THREADED_TEST(InterceptorICReferenceErrors) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorICRefErrorGetter));
+ LocalContext context(0, templ, v8::Handle<Value>());
+ call_ic_function2 = v8_compile("function h(x) { return x; }; h")->Run();
+ v8::Handle<Value> value = CompileRun(
+ "function f() {"
+ " for (var i = 0; i < 1000; i++) {"
+ " try { x; } catch(e) { return true; }"
+ " }"
+ " return false;"
+ "};"
+ "f();");
+ CHECK_EQ(true, value->BooleanValue());
+ interceptor_call_count = 0;
+ value = CompileRun(
+ "function g() {"
+ " for (var i = 0; i < 1000; i++) {"
+ " try { x(42); } catch(e) { return true; }"
+ " }"
+ " return false;"
+ "};"
+ "g();");
+ CHECK_EQ(true, value->BooleanValue());
+}
+
+
+static int interceptor_ic_exception_get_count = 0;
+
+static void InterceptorICExceptionGetter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (v8_str("x")->Equals(name) && ++interceptor_ic_exception_get_count < 20) {
+ info.GetReturnValue().Set(call_ic_function3);
+ }
+ if (interceptor_ic_exception_get_count == 20) {
+ info.GetIsolate()->ThrowException(v8_num(42));
+ return;
+ }
+}
+
+
+// Test interceptor load/call IC where the interceptor throws an
+// exception once in a while.
+THREADED_TEST(InterceptorICGetterExceptions) {
+ interceptor_ic_exception_get_count = 0;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorICExceptionGetter));
+ LocalContext context(0, templ, v8::Handle<Value>());
+ call_ic_function3 = v8_compile("function h(x) { return x; }; h")->Run();
+ v8::Handle<Value> value = CompileRun(
+ "function f() {"
+ " for (var i = 0; i < 100; i++) {"
+ " try { x; } catch(e) { return true; }"
+ " }"
+ " return false;"
+ "};"
+ "f();");
+ CHECK_EQ(true, value->BooleanValue());
+ interceptor_ic_exception_get_count = 0;
+ value = CompileRun(
+ "function f() {"
+ " for (var i = 0; i < 100; i++) {"
+ " try { x(42); } catch(e) { return true; }"
+ " }"
+ " return false;"
+ "};"
+ "f();");
+ CHECK_EQ(true, value->BooleanValue());
+}
+
+
+static int interceptor_ic_exception_set_count = 0;
+
+static void InterceptorICExceptionSetter(
+ Local<Name> key, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ if (++interceptor_ic_exception_set_count > 20) {
+ info.GetIsolate()->ThrowException(v8_num(42));
+ }
+}
+
+
+// Test interceptor store IC where the interceptor throws an exception
+// once in a while.
+THREADED_TEST(InterceptorICSetterExceptions) {
+ interceptor_ic_exception_set_count = 0;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(0, InterceptorICExceptionSetter));
+ LocalContext context(0, templ, v8::Handle<Value>());
+ v8::Handle<Value> value = CompileRun(
+ "function f() {"
+ " for (var i = 0; i < 100; i++) {"
+ " try { x = 42; } catch(e) { return true; }"
+ " }"
+ " return false;"
+ "};"
+ "f();");
+ CHECK_EQ(true, value->BooleanValue());
+}
+
+
+// Test that we ignore null interceptors.
+THREADED_TEST(NullNamedInterceptor) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ static_cast<v8::GenericNamedPropertyGetterCallback>(0)));
+ LocalContext context;
+ templ->Set(CcTest::isolate(), "x", v8_num(42));
+ v8::Handle<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+ v8::Handle<Value> value = CompileRun("obj.x");
+ CHECK(value->IsInt32());
+ CHECK_EQ(42, value->Int32Value());
+}
+
+
+// Test that we ignore null interceptors.
+THREADED_TEST(NullIndexedInterceptor) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ static_cast<v8::IndexedPropertyGetterCallback>(0)));
+ LocalContext context;
+ templ->Set(CcTest::isolate(), "42", v8_num(42));
+ v8::Handle<v8::Object> obj = templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), obj);
+ v8::Handle<Value> value = CompileRun("obj[42]");
+ CHECK(value->IsInt32());
+ CHECK_EQ(42, value->Int32Value());
+}
+
+
+THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->InstanceTemplate()->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
+ LocalContext env;
+ env->Global()->Set(v8_str("obj"), templ->GetFunction()->NewInstance());
+ ExpectTrue("obj.x === 42");
+ ExpectTrue("!obj.propertyIsEnumerable('x')");
+}
+
+
+THREADED_TEST(Regress256330) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
+ AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+ context->Global()->Set(v8_str("Bug"), templ->GetFunction());
+ CompileRun(
+ "\"use strict\"; var o = new Bug;"
+ "function f(o) { o.x = 10; };"
+ "f(o); f(o); f(o);"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f(o);");
+ ExpectBoolean("%GetOptimizationStatus(f) != 2", true);
+}
+
+
+THREADED_TEST(CrankshaftInterceptorSetter) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ CompileRun(
+ "var obj = new Obj;"
+ // Initialize fields to avoid transitions later.
+ "obj.age = 0;"
+ "obj.accessor_age = 42;"
+ "function setter(i) { this.accessor_age = i; };"
+ "function getter() { return this.accessor_age; };"
+ "function setAge(i) { obj.age = i; };"
+ "Object.defineProperty(obj, 'age', { get:getter, set:setter });"
+ "setAge(1);"
+ "setAge(2);"
+ "setAge(3);"
+ "%OptimizeFunctionOnNextCall(setAge);"
+ "setAge(4);");
+ // All stores went through the interceptor.
+ ExpectInt32("obj.interceptor_age", 4);
+ ExpectInt32("obj.accessor_age", 42);
+}
+
+
+THREADED_TEST(CrankshaftInterceptorGetter) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ CompileRun(
+ "var obj = new Obj;"
+ // Initialize fields to avoid transitions later.
+ "obj.age = 1;"
+ "obj.accessor_age = 42;"
+ "function getter() { return this.accessor_age; };"
+ "function getAge() { return obj.interceptor_age; };"
+ "Object.defineProperty(obj, 'interceptor_age', { get:getter });"
+ "getAge();"
+ "getAge();"
+ "getAge();"
+ "%OptimizeFunctionOnNextCall(getAge);");
+ // Access through interceptor.
+ ExpectInt32("getAge()", 1);
+}
+
+
+THREADED_TEST(CrankshaftInterceptorFieldRead) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ CompileRun(
+ "var obj = new Obj;"
+ "obj.__proto__.interceptor_age = 42;"
+ "obj.age = 100;"
+ "function getAge() { return obj.interceptor_age; };");
+ ExpectInt32("getAge();", 100);
+ ExpectInt32("getAge();", 100);
+ ExpectInt32("getAge();", 100);
+ CompileRun("%OptimizeFunctionOnNextCall(getAge);");
+ // Access through interceptor.
+ ExpectInt32("getAge();", 100);
+}
+
+
+THREADED_TEST(CrankshaftInterceptorFieldWrite) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ CompileRun(
+ "var obj = new Obj;"
+ "obj.age = 100000;"
+ "function setAge(i) { obj.age = i };"
+ "setAge(100);"
+ "setAge(101);"
+ "setAge(102);"
+ "%OptimizeFunctionOnNextCall(setAge);"
+ "setAge(103);");
+ ExpectInt32("obj.age", 100000);
+ ExpectInt32("obj.interceptor_age", 103);
+}
+
+
+THREADED_TEST(Regress149912) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Handle<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
+ AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ context->Global()->Set(v8_str("Bug"), templ->GetFunction());
+ CompileRun("Number.prototype.__proto__ = new Bug; var x = 0; x.foo();");
+}
+
+
+THREADED_TEST(Regress125988) {
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FunctionTemplate> intercept = FunctionTemplate::New(CcTest::isolate());
+ AddInterceptor(intercept, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ LocalContext env;
+ env->Global()->Set(v8_str("Intercept"), intercept->GetFunction());
+ CompileRun(
+ "var a = new Object();"
+ "var b = new Intercept();"
+ "var c = new Object();"
+ "c.__proto__ = b;"
+ "b.__proto__ = a;"
+ "a.x = 23;"
+ "for (var i = 0; i < 3; i++) c.x;");
+ ExpectBoolean("c.hasOwnProperty('x')", false);
+ ExpectInt32("c.x", 23);
+ CompileRun(
+ "a.y = 42;"
+ "for (var i = 0; i < 3; i++) c.x;");
+ ExpectBoolean("c.hasOwnProperty('x')", false);
+ ExpectInt32("c.x", 23);
+ ExpectBoolean("c.hasOwnProperty('y')", false);
+ ExpectInt32("c.y", 42);
+}
+
+
+static void IndexedPropertyEnumerator(
+ const v8::PropertyCallbackInfo<v8::Array>& info) {
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 1);
+ result->Set(0, v8::Integer::New(info.GetIsolate(), 7));
+ info.GetReturnValue().Set(result);
+}
+
+
+static void NamedPropertyEnumerator(
+ const v8::PropertyCallbackInfo<v8::Array>& info) {
+ v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
+ result->Set(0, v8_str("x"));
+ result->Set(1, v8::Symbol::GetIterator(info.GetIsolate()));
+ info.GetReturnValue().Set(result);
+}
+
+
+THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj_template =
+ v8::ObjectTemplate::New(isolate);
+
+ obj_template->Set(v8_str("7"), v8::Integer::New(CcTest::isolate(), 7));
+ obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
+ obj_template->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+ NULL, NULL, NULL, NULL, IndexedPropertyEnumerator));
+ obj_template->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ NULL, NULL, NULL, NULL, NamedPropertyEnumerator));
+
+ LocalContext context;
+ v8::Handle<v8::Object> global = context->Global();
+ global->Set(v8_str("object"), obj_template->NewInstance());
+
+ v8::Handle<v8::Value> result =
+ CompileRun("Object.getOwnPropertyNames(object)");
+ CHECK(result->IsArray());
+ v8::Handle<v8::Array> result_array = v8::Handle<v8::Array>::Cast(result);
+ CHECK_EQ(2u, result_array->Length());
+ CHECK(result_array->Get(0)->IsString());
+ CHECK(result_array->Get(1)->IsString());
+ CHECK(v8_str("7")->Equals(result_array->Get(0)));
+ CHECK(v8_str("x")->Equals(result_array->Get(1)));
+
+ result = CompileRun("var ret = []; for (var k in object) ret.push(k); ret");
+ CHECK(result->IsArray());
+ result_array = v8::Handle<v8::Array>::Cast(result);
+ CHECK_EQ(2u, result_array->Length());
+ CHECK(result_array->Get(0)->IsString());
+ CHECK(result_array->Get(1)->IsString());
+ CHECK(v8_str("7")->Equals(result_array->Get(0)));
+ CHECK(v8_str("x")->Equals(result_array->Get(1)));
+
+ result = CompileRun("Object.getOwnPropertySymbols(object)");
+ CHECK(result->IsArray());
+ result_array = v8::Handle<v8::Array>::Cast(result);
+ CHECK_EQ(1u, result_array->Length());
+ CHECK(result_array->Get(0)->Equals(v8::Symbol::GetIterator(isolate)));
+}
+
+
+namespace {
+
+template <typename T>
+Local<Object> BuildWrappedObject(v8::Isolate* isolate, T* data) {
+ auto templ = v8::ObjectTemplate::New(isolate);
+ templ->SetInternalFieldCount(1);
+ auto instance = templ->NewInstance();
+ instance->SetAlignedPointerInInternalField(0, data);
+ return instance;
+}
+
+
+template <typename T>
+T* GetWrappedObject(Local<Value> data) {
+ return reinterpret_cast<T*>(
+ Object::Cast(*data)->GetAlignedPointerFromInternalField(0));
+}
+
+
+struct AccessCheckData {
+ int count;
+ bool result;
+};
+
+
+bool SimpleNamedAccessChecker(Local<v8::Object> global, Local<Value> name,
+ v8::AccessType type, Local<Value> data) {
+ auto access_check_data = GetWrappedObject<AccessCheckData>(data);
+ access_check_data->count++;
+ return access_check_data->result;
+}
+
+
+bool SimpleIndexedAccessChecker(Local<v8::Object> global, uint32_t index,
+ v8::AccessType type, Local<Value> data) {
+ auto access_check_data = GetWrappedObject<AccessCheckData>(data);
+ access_check_data->count++;
+ return access_check_data->result;
+}
+
+
+struct ShouldInterceptData {
+ int value;
+ bool should_intercept;
+};
+
+
+void ShouldNamedInterceptor(Local<Name> name,
+ const v8::PropertyCallbackInfo<Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info, FUNCTION_ADDR(ShouldNamedInterceptor));
+ auto data = GetWrappedObject<ShouldInterceptData>(info.Data());
+ if (!data->should_intercept) return;
+ info.GetReturnValue().Set(v8_num(data->value));
+}
+
+
+void ShouldIndexedInterceptor(uint32_t,
+ const v8::PropertyCallbackInfo<Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ CheckReturnValue(info, FUNCTION_ADDR(ShouldIndexedInterceptor));
+ auto data = GetWrappedObject<ShouldInterceptData>(info.Data());
+ if (!data->should_intercept) return;
+ info.GetReturnValue().Set(v8_num(data->value));
+}
+
+} // namespace
+
+
+THREADED_TEST(NamedAllCanReadInterceptor) {
+ auto isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext context;
+
+ AccessCheckData access_check_data;
+ access_check_data.result = false;
+ access_check_data.count = 0;
+
+ ShouldInterceptData intercept_data_0;
+ intercept_data_0.value = 239;
+ intercept_data_0.should_intercept = true;
+
+ ShouldInterceptData intercept_data_1;
+ intercept_data_1.value = 165;
+ intercept_data_1.should_intercept = false;
+
+ auto intercepted_0 = v8::ObjectTemplate::New(isolate);
+ {
+ v8::NamedPropertyHandlerConfiguration conf(ShouldNamedInterceptor);
+ conf.flags = v8::PropertyHandlerFlags::kAllCanRead;
+ conf.data =
+ BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data_0);
+ intercepted_0->SetHandler(conf);
+ }
+
+ auto intercepted_1 = v8::ObjectTemplate::New(isolate);
+ {
+ v8::NamedPropertyHandlerConfiguration conf(ShouldNamedInterceptor);
+ conf.flags = v8::PropertyHandlerFlags::kAllCanRead;
+ conf.data =
+ BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data_1);
+ intercepted_1->SetHandler(conf);
+ }
+
+ auto checked = v8::ObjectTemplate::New(isolate);
+ checked->SetAccessCheckCallbacks(
+ SimpleNamedAccessChecker, nullptr,
+ BuildWrappedObject<AccessCheckData>(isolate, &access_check_data), false);
+
+ context->Global()->Set(v8_str("intercepted_0"), intercepted_0->NewInstance());
+ context->Global()->Set(v8_str("intercepted_1"), intercepted_1->NewInstance());
+ auto checked_instance = checked->NewInstance();
+ checked_instance->Set(v8_str("whatever"), v8_num(17));
+ context->Global()->Set(v8_str("checked"), checked_instance);
+ CompileRun(
+ "checked.__proto__ = intercepted_1;"
+ "intercepted_1.__proto__ = intercepted_0;");
+
+ checked_instance->TurnOnAccessCheck();
+ CHECK_EQ(0, access_check_data.count);
+
+ access_check_data.result = true;
+ ExpectInt32("checked.whatever", 17);
+ CHECK_EQ(1, access_check_data.count);
+
+ access_check_data.result = false;
+ ExpectInt32("checked.whatever", intercept_data_0.value);
+ CHECK_EQ(2, access_check_data.count);
+
+ intercept_data_1.should_intercept = true;
+ ExpectInt32("checked.whatever", intercept_data_1.value);
+ CHECK_EQ(3, access_check_data.count);
+}
+
+
+THREADED_TEST(IndexedAllCanReadInterceptor) {
+ auto isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext context;
+
+ AccessCheckData access_check_data;
+ access_check_data.result = false;
+ access_check_data.count = 0;
+
+ ShouldInterceptData intercept_data_0;
+ intercept_data_0.value = 239;
+ intercept_data_0.should_intercept = true;
+
+ ShouldInterceptData intercept_data_1;
+ intercept_data_1.value = 165;
+ intercept_data_1.should_intercept = false;
+
+ auto intercepted_0 = v8::ObjectTemplate::New(isolate);
+ {
+ v8::IndexedPropertyHandlerConfiguration conf(ShouldIndexedInterceptor);
+ conf.flags = v8::PropertyHandlerFlags::kAllCanRead;
+ conf.data =
+ BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data_0);
+ intercepted_0->SetHandler(conf);
+ }
+
+ auto intercepted_1 = v8::ObjectTemplate::New(isolate);
+ {
+ v8::IndexedPropertyHandlerConfiguration conf(ShouldIndexedInterceptor);
+ conf.flags = v8::PropertyHandlerFlags::kAllCanRead;
+ conf.data =
+ BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data_1);
+ intercepted_1->SetHandler(conf);
+ }
+
+ auto checked = v8::ObjectTemplate::New(isolate);
+ checked->SetAccessCheckCallbacks(
+ nullptr, SimpleIndexedAccessChecker,
+ BuildWrappedObject<AccessCheckData>(isolate, &access_check_data), false);
+
+ context->Global()->Set(v8_str("intercepted_0"), intercepted_0->NewInstance());
+ context->Global()->Set(v8_str("intercepted_1"), intercepted_1->NewInstance());
+ auto checked_instance = checked->NewInstance();
+ context->Global()->Set(v8_str("checked"), checked_instance);
+ checked_instance->Set(15, v8_num(17));
+ CompileRun(
+ "checked.__proto__ = intercepted_1;"
+ "intercepted_1.__proto__ = intercepted_0;");
+
+ checked_instance->TurnOnAccessCheck();
+ CHECK_EQ(0, access_check_data.count);
+
+ access_check_data.result = true;
+ ExpectInt32("checked[15]", 17);
+ CHECK_EQ(1, access_check_data.count);
+
+ access_check_data.result = false;
+ ExpectInt32("checked[15]", intercept_data_0.value);
+ CHECK_EQ(2, access_check_data.count);
+
+ intercept_data_1.should_intercept = true;
+ ExpectInt32("checked[15]", intercept_data_1.value);
+ CHECK_EQ(3, access_check_data.count);
+}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 17de186b9c..096e52b051 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -30,7 +30,7 @@
#include <map>
#include <string>
-#include "src/v8.h"
+#include "test/cctest/test-api.h"
#if V8_OS_POSIX
#include <unistd.h> // NOLINT
@@ -41,16 +41,14 @@
#include "src/arguments.h"
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
-#include "src/cpu-profiler.h"
#include "src/execution.h"
-#include "src/isolate.h"
#include "src/objects.h"
#include "src/parser.h"
+#include "src/smart-pointers.h"
#include "src/snapshot.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
-#include "test/cctest/cctest.h"
static const bool kLogThreading = false;
@@ -107,8 +105,8 @@ static void IncrementingSignatureCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
signature_callback_count++;
- CHECK_EQ(signature_expected_receiver, args.Holder());
- CHECK_EQ(signature_expected_receiver, args.This());
+ CHECK(signature_expected_receiver->Equals(args.Holder()));
+ CHECK(signature_expected_receiver->Equals(args.This()));
v8::Handle<v8::Array> result =
v8::Array::New(args.GetIsolate(), args.Length());
for (int i = 0; i < args.Length(); i++)
@@ -117,15 +115,8 @@ static void IncrementingSignatureCallback(
}
-static void SignatureCallback(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- ApiTestFuzzer::Fuzz();
- v8::Handle<v8::Array> result =
- v8::Array::New(args.GetIsolate(), args.Length());
- for (int i = 0; i < args.Length(); i++) {
- result->Set(v8::Integer::New(args.GetIsolate(), i), args[i]);
- }
- args.GetReturnValue().Set(result);
+static void Returns42(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(42);
}
@@ -202,8 +193,8 @@ static void TestSignature(const char* loop_js, Local<Value> receiver,
if (!expected_to_throw) {
CHECK_EQ(10, signature_callback_count);
} else {
- CHECK_EQ(v8_str("TypeError: Illegal invocation"),
- try_catch.Exception()->ToString(isolate));
+ CHECK(v8_str("TypeError: Illegal invocation")
+ ->Equals(try_catch.Exception()->ToString(isolate)));
}
}
@@ -283,75 +274,6 @@ THREADED_TEST(ReceiverSignature) {
}
-THREADED_TEST(ArgumentSignature) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> cons = v8::FunctionTemplate::New(isolate);
- cons->SetClassName(v8_str("Cons"));
- v8::Handle<v8::Signature> sig = v8::Signature::New(
- isolate, v8::Handle<v8::FunctionTemplate>(), 1, &cons);
- v8::Handle<v8::FunctionTemplate> fun =
- v8::FunctionTemplate::New(isolate,
- SignatureCallback,
- v8::Handle<Value>(),
- sig);
- env->Global()->Set(v8_str("Cons"), cons->GetFunction());
- env->Global()->Set(v8_str("Fun1"), fun->GetFunction());
-
- v8::Handle<Value> value1 = CompileRun("Fun1(4) == '';");
- CHECK(value1->IsTrue());
-
- v8::Handle<Value> value2 = CompileRun("Fun1(new Cons()) == '[object Cons]';");
- CHECK(value2->IsTrue());
-
- v8::Handle<Value> value3 = CompileRun("Fun1() == '';");
- CHECK(value3->IsTrue());
-
- v8::Handle<v8::FunctionTemplate> cons1 = v8::FunctionTemplate::New(isolate);
- cons1->SetClassName(v8_str("Cons1"));
- v8::Handle<v8::FunctionTemplate> cons2 = v8::FunctionTemplate::New(isolate);
- cons2->SetClassName(v8_str("Cons2"));
- v8::Handle<v8::FunctionTemplate> cons3 = v8::FunctionTemplate::New(isolate);
- cons3->SetClassName(v8_str("Cons3"));
-
- v8::Handle<v8::FunctionTemplate> args[3] = { cons1, cons2, cons3 };
- v8::Handle<v8::Signature> wsig = v8::Signature::New(
- isolate, v8::Handle<v8::FunctionTemplate>(), 3, args);
- v8::Handle<v8::FunctionTemplate> fun2 =
- v8::FunctionTemplate::New(isolate,
- SignatureCallback,
- v8::Handle<Value>(),
- wsig);
-
- env->Global()->Set(v8_str("Cons1"), cons1->GetFunction());
- env->Global()->Set(v8_str("Cons2"), cons2->GetFunction());
- env->Global()->Set(v8_str("Cons3"), cons3->GetFunction());
- env->Global()->Set(v8_str("Fun2"), fun2->GetFunction());
- v8::Handle<Value> value4 = CompileRun(
- "Fun2(new Cons1(), new Cons2(), new Cons3()) =="
- "'[object Cons1],[object Cons2],[object Cons3]'");
- CHECK(value4->IsTrue());
-
- v8::Handle<Value> value5 = CompileRun(
- "Fun2(new Cons1(), new Cons2(), 5) == '[object Cons1],[object Cons2],'");
- CHECK(value5->IsTrue());
-
- v8::Handle<Value> value6 = CompileRun(
- "Fun2(new Cons3(), new Cons2(), new Cons1()) == ',[object Cons2],'");
- CHECK(value6->IsTrue());
-
- v8::Handle<Value> value7 = CompileRun(
- "Fun2(new Cons1(), new Cons2(), new Cons3(), 'd') == "
- "'[object Cons1],[object Cons2],[object Cons3],d';");
- CHECK(value7->IsTrue());
-
- v8::Handle<Value> value8 = CompileRun(
- "Fun2(new Cons1(), new Cons2()) == '[object Cons1],[object Cons2]'");
- CHECK(value8->IsTrue());
-}
-
-
THREADED_TEST(HulIgennem) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -377,7 +299,7 @@ THREADED_TEST(Access) {
Local<Value> foo_after = obj->Get(v8_str("foo"));
CHECK(!foo_after->IsUndefined());
CHECK(foo_after->IsString());
- CHECK_EQ(bar_str, foo_after);
+ CHECK(bar_str->Equals(foo_after));
}
@@ -392,11 +314,11 @@ THREADED_TEST(AccessElement) {
Local<Value> after = obj->Get(1);
CHECK(!after->IsUndefined());
CHECK(after->IsString());
- CHECK_EQ(bar_str, after);
+ CHECK(bar_str->Equals(after));
Local<v8::Array> value = CompileRun("[\"a\", \"b\"]").As<v8::Array>();
- CHECK_EQ(v8_str("a"), value->Get(0));
- CHECK_EQ(v8_str("b"), value->Get(1));
+ CHECK(v8_str("a")->Equals(value->Get(0)));
+ CHECK(v8_str("b")->Equals(value->Get(1)));
}
@@ -540,7 +462,7 @@ THREADED_TEST(ScriptMakingExternalString) {
CHECK_EQ(source->IsExternal(), false);
CHECK_EQ(source->IsExternalOneByte(), false);
String::Encoding encoding = String::UNKNOWN_ENCODING;
- CHECK_EQ(NULL, source->GetExternalStringResourceBase(&encoding));
+ CHECK(!source->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
bool success = source->MakeExternal(new TestResource(two_byte_source,
&dispose_count));
@@ -778,7 +700,7 @@ THREADED_TEST(NewExternalForVeryLongString) {
CHECK(str.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ("RangeError: Invalid string length", *exception_value);
+ CHECK_EQ(0, strcmp("RangeError: Invalid string length", *exception_value));
}
{
@@ -790,7 +712,7 @@ THREADED_TEST(NewExternalForVeryLongString) {
CHECK(str.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ("RangeError: Invalid string length", *exception_value);
+ CHECK_EQ(0, strcmp("RangeError: Invalid string length", *exception_value));
}
}
@@ -968,32 +890,6 @@ THREADED_TEST(GlobalProperties) {
}
-template<typename T>
-static void CheckReturnValue(const T& t, i::Address callback) {
- v8::ReturnValue<v8::Value> rv = t.GetReturnValue();
- i::Object** o = *reinterpret_cast<i::Object***>(&rv);
- CHECK_EQ(CcTest::isolate(), t.GetIsolate());
- CHECK_EQ(t.GetIsolate(), rv.GetIsolate());
- CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
- // Verify reset
- bool is_runtime = (*o)->IsTheHole();
- rv.Set(true);
- CHECK(!(*o)->IsTheHole() && !(*o)->IsUndefined());
- rv.Set(v8::Handle<v8::Object>());
- CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
- CHECK_EQ(is_runtime, (*o)->IsTheHole());
-
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(t.GetIsolate());
- // If CPU profiler is active check that when API callback is invoked
- // VMState is set to EXTERNAL.
- if (isolate->cpu_profiler()->is_profiling()) {
- CHECK_EQ(v8::EXTERNAL, isolate->current_vm_state());
- CHECK(isolate->external_callback_scope());
- CHECK_EQ(callback, isolate->external_callback_scope()->callback());
- }
-}
-
-
static void handle_callback_impl(const v8::FunctionCallbackInfo<Value>& info,
i::Address callback) {
ApiTestFuzzer::Fuzz();
@@ -1082,7 +978,7 @@ static void TestFunctionTemplateAccessor(Constructor constructor,
Local<Function> fun = fun_templ->GetFunction();
env->Global()->Set(v8_str("obj"), fun);
Local<Value> result = v8_compile("(new obj()).toString()")->Run();
- CHECK_EQ(v8_str("[object funky]"), result);
+ CHECK(v8_str("[object funky]")->Equals(result));
CompileRun("var obj_instance = new obj();");
Local<Script> script;
script = v8_compile("obj_instance.x");
@@ -1434,16 +1330,16 @@ THREADED_TEST(FindInstanceInPrototypeChain) {
other_instance->Set(v8_str("__proto__"), derived_instance2);
// base_instance is only an instance of base.
- CHECK_EQ(base_instance,
- base_instance->FindInstanceInPrototypeChain(base));
+ CHECK(
+ base_instance->Equals(base_instance->FindInstanceInPrototypeChain(base)));
CHECK(base_instance->FindInstanceInPrototypeChain(derived).IsEmpty());
CHECK(base_instance->FindInstanceInPrototypeChain(other).IsEmpty());
// derived_instance is an instance of base and derived.
- CHECK_EQ(derived_instance,
- derived_instance->FindInstanceInPrototypeChain(base));
- CHECK_EQ(derived_instance,
- derived_instance->FindInstanceInPrototypeChain(derived));
+ CHECK(derived_instance->Equals(
+ derived_instance->FindInstanceInPrototypeChain(base)));
+ CHECK(derived_instance->Equals(
+ derived_instance->FindInstanceInPrototypeChain(derived)));
CHECK(derived_instance->FindInstanceInPrototypeChain(other).IsEmpty());
// other_instance is an instance of other and its immediate
@@ -1451,12 +1347,12 @@ THREADED_TEST(FindInstanceInPrototypeChain) {
// Note, derived_instance is an instance of base and derived too,
// but it comes after derived_instance2 in the prototype chain of
// other_instance.
- CHECK_EQ(derived_instance2,
- other_instance->FindInstanceInPrototypeChain(base));
- CHECK_EQ(derived_instance2,
- other_instance->FindInstanceInPrototypeChain(derived));
- CHECK_EQ(other_instance,
- other_instance->FindInstanceInPrototypeChain(other));
+ CHECK(derived_instance2->Equals(
+ other_instance->FindInstanceInPrototypeChain(base)));
+ CHECK(derived_instance2->Equals(
+ other_instance->FindInstanceInPrototypeChain(derived)));
+ CHECK(other_instance->Equals(
+ other_instance->FindInstanceInPrototypeChain(other)));
}
@@ -1983,18 +1879,6 @@ THREADED_TEST(DescriptorInheritance) {
}
-int echo_named_call_count;
-
-
-static void EchoNamedProperty(Local<Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- CHECK_EQ(v8_str("data"), info.Data());
- echo_named_call_count++;
- info.GetReturnValue().Set(name);
-}
-
-
// Helper functions for Interceptor/Accessor interaction tests
void SimpleAccessorGetter(Local<String> name,
@@ -2042,213 +1926,6 @@ static void ThrowingSymbolAccessorGetter(
}
-void EmptyInterceptorGetter(Local<Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {}
-
-
-void EmptyInterceptorSetter(Local<Name> name, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {}
-
-
-void EmptyGenericInterceptorGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {}
-
-
-void EmptyGenericInterceptorSetter(
- Local<Name> name, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {}
-
-
-void StringInterceptorGetter(
- Local<String> name,
- const v8::PropertyCallbackInfo<v8::Value>&
- info) { // Intercept names that start with 'interceptor_'.
- String::Utf8Value utf8(name);
- char* name_str = *utf8;
- char prefix[] = "interceptor_";
- int i;
- for (i = 0; name_str[i] && prefix[i]; ++i) {
- if (name_str[i] != prefix[i]) return;
- }
- Handle<Object> self = Handle<Object>::Cast(info.This());
- info.GetReturnValue().Set(self->GetHiddenValue(v8_str(name_str + i)));
-}
-
-
-void StringInterceptorSetter(Local<String> name, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- // Intercept accesses that set certain integer values, for which the name does
- // not start with 'accessor_'.
- String::Utf8Value utf8(name);
- char* name_str = *utf8;
- char prefix[] = "accessor_";
- int i;
- for (i = 0; name_str[i] && prefix[i]; ++i) {
- if (name_str[i] != prefix[i]) break;
- }
- if (!prefix[i]) return;
-
- if (value->IsInt32() && value->Int32Value() < 10000) {
- Handle<Object> self = Handle<Object>::Cast(info.This());
- self->SetHiddenValue(name, value);
- info.GetReturnValue().Set(value);
- }
-}
-
-void InterceptorGetter(Local<Name> generic_name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- if (generic_name->IsSymbol()) return;
- StringInterceptorGetter(Local<String>::Cast(generic_name), info);
-}
-
-void InterceptorSetter(Local<Name> generic_name, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- if (generic_name->IsSymbol()) return;
- StringInterceptorSetter(Local<String>::Cast(generic_name), value, info);
-}
-
-void GenericInterceptorGetter(Local<Name> generic_name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- Local<String> str;
- if (generic_name->IsSymbol()) {
- Local<Value> name = Local<Symbol>::Cast(generic_name)->Name();
- if (name->IsUndefined()) return;
- str = String::Concat(v8_str("_sym_"), Local<String>::Cast(name));
- } else {
- Local<String> name = Local<String>::Cast(generic_name);
- String::Utf8Value utf8(name);
- char* name_str = *utf8;
- if (*name_str == '_') return;
- str = String::Concat(v8_str("_str_"), name);
- }
-
- Handle<Object> self = Handle<Object>::Cast(info.This());
- info.GetReturnValue().Set(self->Get(str));
-}
-
-void GenericInterceptorSetter(Local<Name> generic_name, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- Local<String> str;
- if (generic_name->IsSymbol()) {
- Local<Value> name = Local<Symbol>::Cast(generic_name)->Name();
- if (name->IsUndefined()) return;
- str = String::Concat(v8_str("_sym_"), Local<String>::Cast(name));
- } else {
- Local<String> name = Local<String>::Cast(generic_name);
- String::Utf8Value utf8(name);
- char* name_str = *utf8;
- if (*name_str == '_') return;
- str = String::Concat(v8_str("_str_"), name);
- }
-
- Handle<Object> self = Handle<Object>::Cast(info.This());
- self->Set(str, value);
- info.GetReturnValue().Set(value);
-}
-
-void AddAccessor(Handle<FunctionTemplate> templ,
- Handle<String> name,
- v8::AccessorGetterCallback getter,
- v8::AccessorSetterCallback setter) {
- templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
-}
-
-void AddInterceptor(Handle<FunctionTemplate> templ,
- v8::NamedPropertyGetterCallback getter,
- v8::NamedPropertySetterCallback setter) {
- templ->InstanceTemplate()->SetNamedPropertyHandler(getter, setter);
-}
-
-
-void AddAccessor(Handle<FunctionTemplate> templ,
- Handle<Name> name,
- v8::AccessorNameGetterCallback getter,
- v8::AccessorNameSetterCallback setter) {
- templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
-}
-
-void AddInterceptor(Handle<FunctionTemplate> templ,
- v8::GenericNamedPropertyGetterCallback getter,
- v8::GenericNamedPropertySetterCallback setter) {
- templ->InstanceTemplate()->SetHandler(
- v8::NamedPropertyHandlerConfiguration(getter, setter));
-}
-
-
-THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
- child->Inherit(parent);
- AddAccessor(parent, v8_str("age"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "child.age = 10;");
- ExpectBoolean("child.hasOwnProperty('age')", false);
- ExpectInt32("child.age", 10);
- ExpectInt32("child.accessor_age", 10);
-}
-
-
-THREADED_TEST(LegacyInterceptorDoesNotSeeSymbols) {
- LocalContext env;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
- Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
- v8::Local<v8::Symbol> age = v8::Symbol::New(isolate, v8_str("age"));
-
- child->Inherit(parent);
- AddAccessor(parent, age, SymbolAccessorGetter, SymbolAccessorSetter);
- AddInterceptor(child, StringInterceptorGetter, StringInterceptorSetter);
-
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- env->Global()->Set(v8_str("age"), age);
- CompileRun(
- "var child = new Child;"
- "child[age] = 10;");
- ExpectInt32("child[age]", 10);
- ExpectBoolean("child.hasOwnProperty('age')", false);
- ExpectBoolean("child.hasOwnProperty('accessor_age')", true);
-}
-
-
-THREADED_TEST(GenericInterceptorDoesSeeSymbols) {
- LocalContext env;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
- Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
- v8::Local<v8::Symbol> age = v8::Symbol::New(isolate, v8_str("age"));
- v8::Local<v8::Symbol> anon = v8::Symbol::New(isolate);
-
- child->Inherit(parent);
- AddAccessor(parent, age, SymbolAccessorGetter, SymbolAccessorSetter);
- AddInterceptor(child, GenericInterceptorGetter, GenericInterceptorSetter);
-
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- env->Global()->Set(v8_str("age"), age);
- env->Global()->Set(v8_str("anon"), anon);
- CompileRun(
- "var child = new Child;"
- "child[age] = 10;");
- ExpectInt32("child[age]", 10);
- ExpectInt32("child._sym_age", 10);
-
- // Check that it also sees strings.
- CompileRun("child.foo = 47");
- ExpectInt32("child.foo", 47);
- ExpectInt32("child._str_foo", 47);
-
- // Check that the interceptor can punt (in this case, on anonymous symbols).
- CompileRun("child[anon] = 31337");
- ExpectInt32("child[anon]", 31337);
-}
-
-
THREADED_TEST(ExecutableAccessorIsPreservedOnAttributeChange) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -2260,8 +1937,6 @@ THREADED_TEST(ExecutableAccessorIsPreservedOnAttributeChange) {
CompileRun("Object.defineProperty(a, 'length', { writable: false });");
CHECK_EQ(i::FixedArray::cast(a->map()->instance_descriptors())->length(), 0);
// But we should still have an ExecutableAccessorInfo.
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::LookupResult lookup(i_isolate);
i::Handle<i::String> name(v8::Utils::OpenHandle(*v8_str("length")));
i::LookupIterator it(a, name, i::LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_EQ(i::LookupIterator::ACCESSOR, it.state());
@@ -2269,442 +1944,6 @@ THREADED_TEST(ExecutableAccessorIsPreservedOnAttributeChange) {
}
-THREADED_TEST(EmptyInterceptorBreakTransitions) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
- AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Constructor"), templ->GetFunction());
- CompileRun("var o1 = new Constructor;"
- "o1.a = 1;" // Ensure a and x share the descriptor array.
- "Object.defineProperty(o1, 'x', {value: 10});");
- CompileRun("var o2 = new Constructor;"
- "o2.a = 1;"
- "Object.defineProperty(o2, 'x', {value: 10});");
-}
-
-
-THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
- Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
- child->Inherit(parent);
- AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "var parent = child.__proto__;"
- "Object.defineProperty(parent, 'age', "
- " {get: function(){ return this.accessor_age; }, "
- " set: function(v){ this.accessor_age = v; }, "
- " enumerable: true, configurable: true});"
- "child.age = 10;");
- ExpectBoolean("child.hasOwnProperty('age')", false);
- ExpectInt32("child.age", 10);
- ExpectInt32("child.accessor_age", 10);
-}
-
-
-THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
- Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
- child->Inherit(parent);
- AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "var parent = child.__proto__;"
- "parent.name = 'Alice';");
- ExpectBoolean("child.hasOwnProperty('name')", false);
- ExpectString("child.name", "Alice");
- CompileRun("child.name = 'Bob';");
- ExpectString("child.name", "Bob");
- ExpectBoolean("child.hasOwnProperty('name')", true);
- ExpectString("parent.name", "Alice");
-}
-
-
-THREADED_TEST(SwitchFromInterceptorToAccessor) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
- AddAccessor(templ, v8_str("age"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
- CompileRun("var obj = new Obj;"
- "function setAge(i){ obj.age = i; };"
- "for(var i = 0; i <= 10000; i++) setAge(i);");
- // All i < 10000 go to the interceptor.
- ExpectInt32("obj.interceptor_age", 9999);
- // The last i goes to the accessor.
- ExpectInt32("obj.accessor_age", 10000);
-}
-
-
-THREADED_TEST(SwitchFromAccessorToInterceptor) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
- AddAccessor(templ, v8_str("age"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
- CompileRun("var obj = new Obj;"
- "function setAge(i){ obj.age = i; };"
- "for(var i = 20000; i >= 9999; i--) setAge(i);");
- // All i >= 10000 go to the accessor.
- ExpectInt32("obj.accessor_age", 10000);
- // The last i goes to the interceptor.
- ExpectInt32("obj.interceptor_age", 9999);
-}
-
-
-THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
- child->Inherit(parent);
- AddAccessor(parent, v8_str("age"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- AddInterceptor(child, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "function setAge(i){ child.age = i; };"
- "for(var i = 0; i <= 10000; i++) setAge(i);");
- // All i < 10000 go to the interceptor.
- ExpectInt32("child.interceptor_age", 9999);
- // The last i goes to the accessor.
- ExpectInt32("child.accessor_age", 10000);
-}
-
-
-THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
- child->Inherit(parent);
- AddAccessor(parent, v8_str("age"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- AddInterceptor(child, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "function setAge(i){ child.age = i; };"
- "for(var i = 20000; i >= 9999; i--) setAge(i);");
- // All i >= 10000 go to the accessor.
- ExpectInt32("child.accessor_age", 10000);
- // The last i goes to the interceptor.
- ExpectInt32("child.interceptor_age", 9999);
-}
-
-
-THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
- AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
- CompileRun("var obj = new Obj;"
- "function setter(i) { this.accessor_age = i; };"
- "function getter() { return this.accessor_age; };"
- "function setAge(i) { obj.age = i; };"
- "Object.defineProperty(obj, 'age', { get:getter, set:setter });"
- "for(var i = 0; i <= 10000; i++) setAge(i);");
- // All i < 10000 go to the interceptor.
- ExpectInt32("obj.interceptor_age", 9999);
- // The last i goes to the JavaScript accessor.
- ExpectInt32("obj.accessor_age", 10000);
- // The installed JavaScript getter is still intact.
- // This last part is a regression test for issue 1651 and relies on the fact
- // that both interceptor and accessor are being installed on the same object.
- ExpectInt32("obj.age", 10000);
- ExpectBoolean("obj.hasOwnProperty('age')", true);
- ExpectUndefined("Object.getOwnPropertyDescriptor(obj, 'age').value");
-}
-
-
-THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
- AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
- CompileRun("var obj = new Obj;"
- "function setter(i) { this.accessor_age = i; };"
- "function getter() { return this.accessor_age; };"
- "function setAge(i) { obj.age = i; };"
- "Object.defineProperty(obj, 'age', { get:getter, set:setter });"
- "for(var i = 20000; i >= 9999; i--) setAge(i);");
- // All i >= 10000 go to the accessor.
- ExpectInt32("obj.accessor_age", 10000);
- // The last i goes to the interceptor.
- ExpectInt32("obj.interceptor_age", 9999);
- // The installed JavaScript getter is still intact.
- // This last part is a regression test for issue 1651 and relies on the fact
- // that both interceptor and accessor are being installed on the same object.
- ExpectInt32("obj.age", 10000);
- ExpectBoolean("obj.hasOwnProperty('age')", true);
- ExpectUndefined("Object.getOwnPropertyDescriptor(obj, 'age').value");
-}
-
-
-THREADED_TEST(SwitchFromInterceptorToProperty) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
- child->Inherit(parent);
- AddInterceptor(child, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "function setAge(i){ child.age = i; };"
- "for(var i = 0; i <= 10000; i++) setAge(i);");
- // All i < 10000 go to the interceptor.
- ExpectInt32("child.interceptor_age", 9999);
- // The last i goes to child's own property.
- ExpectInt32("child.age", 10000);
-}
-
-
-THREADED_TEST(SwitchFromPropertyToInterceptor) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
- child->Inherit(parent);
- AddInterceptor(child, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "function setAge(i){ child.age = i; };"
- "for(var i = 20000; i >= 9999; i--) setAge(i);");
- // All i >= 10000 go to child's own property.
- ExpectInt32("child.age", 10000);
- // The last i goes to the interceptor.
- ExpectInt32("child.interceptor_age", 9999);
-}
-
-
-THREADED_TEST(NamedPropertyHandlerGetter) {
- echo_named_call_count = 0;
- v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> templ =
- v8::FunctionTemplate::New(CcTest::isolate());
- templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- EchoNamedProperty, 0, 0, 0, 0, v8_str("data")));
- LocalContext env;
- env->Global()->Set(v8_str("obj"),
- templ->GetFunction()->NewInstance());
- CHECK_EQ(echo_named_call_count, 0);
- v8_compile("obj.x")->Run();
- CHECK_EQ(echo_named_call_count, 1);
- const char* code = "var str = 'oddle'; obj[str] + obj.poddle;";
- v8::Handle<Value> str = CompileRun(code);
- String::Utf8Value value(str);
- CHECK_EQ(*value, "oddlepoddle");
- // Check default behavior
- CHECK_EQ(v8_compile("obj.flob = 10;")->Run()->Int32Value(), 10);
- CHECK(v8_compile("'myProperty' in obj")->Run()->BooleanValue());
- CHECK(v8_compile("delete obj.myProperty")->Run()->BooleanValue());
-}
-
-
-int echo_indexed_call_count = 0;
-
-
-static void EchoIndexedProperty(
- uint32_t index,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- CHECK_EQ(v8_num(637), info.Data());
- echo_indexed_call_count++;
- info.GetReturnValue().Set(v8_num(index));
-}
-
-
-THREADED_TEST(IndexedPropertyHandlerGetter) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
- templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- EchoIndexedProperty, 0, 0, 0, 0, v8_num(637)));
- LocalContext env;
- env->Global()->Set(v8_str("obj"),
- templ->GetFunction()->NewInstance());
- Local<Script> script = v8_compile("obj[900]");
- CHECK_EQ(script->Run()->Int32Value(), 900);
-}
-
-
-v8::Handle<v8::Object> bottom;
-
-static void CheckThisIndexedPropertyHandler(
- uint32_t index,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyHandler));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-static void CheckThisNamedPropertyHandler(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyHandler));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-void CheckThisIndexedPropertySetter(
- uint32_t index,
- Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertySetter));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-
-void CheckThisNamedPropertySetter(
- Local<Name> property, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertySetter));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-void CheckThisIndexedPropertyQuery(
- uint32_t index,
- const v8::PropertyCallbackInfo<v8::Integer>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyQuery));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-
-void CheckThisNamedPropertyQuery(
- Local<Name> property, const v8::PropertyCallbackInfo<v8::Integer>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyQuery));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-
-void CheckThisIndexedPropertyDeleter(
- uint32_t index,
- const v8::PropertyCallbackInfo<v8::Boolean>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyDeleter));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-
-void CheckThisNamedPropertyDeleter(
- Local<Name> property, const v8::PropertyCallbackInfo<v8::Boolean>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyDeleter));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-
-void CheckThisIndexedPropertyEnumerator(
- const v8::PropertyCallbackInfo<v8::Array>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyEnumerator));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-
-void CheckThisNamedPropertyEnumerator(
- const v8::PropertyCallbackInfo<v8::Array>& info) {
- CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyEnumerator));
- ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
-}
-
-
-THREADED_PROFILED_TEST(PropertyHandlerInPrototype) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
-
- // Set up a prototype chain with three interceptors.
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
- templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- CheckThisIndexedPropertyHandler, CheckThisIndexedPropertySetter,
- CheckThisIndexedPropertyQuery, CheckThisIndexedPropertyDeleter,
- CheckThisIndexedPropertyEnumerator));
-
- templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- CheckThisNamedPropertyHandler, CheckThisNamedPropertySetter,
- CheckThisNamedPropertyQuery, CheckThisNamedPropertyDeleter,
- CheckThisNamedPropertyEnumerator));
-
- bottom = templ->GetFunction()->NewInstance();
- Local<v8::Object> top = templ->GetFunction()->NewInstance();
- Local<v8::Object> middle = templ->GetFunction()->NewInstance();
-
- bottom->SetPrototype(middle);
- middle->SetPrototype(top);
- env->Global()->Set(v8_str("obj"), bottom);
-
- // Indexed and named get.
- CompileRun("obj[0]");
- CompileRun("obj.x");
-
- // Indexed and named set.
- CompileRun("obj[1] = 42");
- CompileRun("obj.y = 42");
-
- // Indexed and named query.
- CompileRun("0 in obj");
- CompileRun("'x' in obj");
-
- // Indexed and named deleter.
- CompileRun("delete obj[0]");
- CompileRun("delete obj.x");
-
- // Enumerators.
- CompileRun("for (var p in obj) ;");
-}
-
-
-static void PrePropertyHandlerGet(
- Local<Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (v8_str("pre")->Equals(key)) {
- info.GetReturnValue().Set(v8_str("PrePropertyHandler: pre"));
- }
-}
-
-
-static void PrePropertyHandlerQuery(
- Local<Name> key, const v8::PropertyCallbackInfo<v8::Integer>& info) {
- if (v8_str("pre")->Equals(key)) {
- info.GetReturnValue().Set(static_cast<int32_t>(v8::None));
- }
-}
-
-
-THREADED_TEST(PrePropertyHandler) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
- desc->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
- PrePropertyHandlerGet, 0, PrePropertyHandlerQuery));
- LocalContext env(NULL, desc->InstanceTemplate());
- CompileRun("var pre = 'Object: pre'; var on = 'Object: on';");
- v8::Handle<Value> result_pre = CompileRun("pre");
- CHECK_EQ(v8_str("PrePropertyHandler: pre"), result_pre);
- v8::Handle<Value> result_on = CompileRun("on");
- CHECK_EQ(v8_str("Object: on"), result_on);
- v8::Handle<Value> result_post = CompileRun("post");
- CHECK(result_post.IsEmpty());
-}
-
-
THREADED_TEST(UndefinedIsNotEnumerable) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -2791,12 +2030,12 @@ THREADED_TEST(CallbackExceptionRegression) {
ThrowingPropertyHandlerGet, ThrowingPropertyHandlerSet));
LocalContext env;
env->Global()->Set(v8_str("obj"), obj->NewInstance());
- v8::Handle<Value> otto = CompileRun(
- "try { with (obj) { otto; } } catch (e) { e; }");
- CHECK_EQ(v8_str("otto"), otto);
- v8::Handle<Value> netto = CompileRun(
- "try { with (obj) { netto = 4; } } catch (e) { e; }");
- CHECK_EQ(v8_str("netto"), netto);
+ v8::Handle<Value> otto =
+ CompileRun("try { with (obj) { otto; } } catch (e) { e; }");
+ CHECK(v8_str("otto")->Equals(otto));
+ v8::Handle<Value> netto =
+ CompileRun("try { with (obj) { netto = 4; } } catch (e) { e; }");
+ CHECK(v8_str("netto")->Equals(netto));
}
@@ -2891,8 +2130,7 @@ THREADED_TEST(InternalFieldsAlignedPointers) {
}
-static void CheckAlignedPointerInEmbedderData(LocalContext* env,
- int index,
+static void CheckAlignedPointerInEmbedderData(LocalContext* env, int index,
void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
(*env)->SetAlignedPointerInEmbedderData(index, value);
@@ -2933,8 +2171,7 @@ THREADED_TEST(EmbedderDataAlignedPointers) {
}
-static void CheckEmbedderData(LocalContext* env,
- int index,
+static void CheckEmbedderData(LocalContext* env, int index,
v8::Handle<Value> data) {
(*env)->SetEmbedderData(index, data);
CHECK((*env)->GetEmbedderData(index)->StrictEquals(data));
@@ -2947,10 +2184,9 @@ THREADED_TEST(EmbedderData) {
v8::HandleScope scope(isolate);
CheckEmbedderData(
- &env, 3,
- v8::String::NewFromUtf8(isolate, "The quick brown fox jumps"));
- CheckEmbedderData(&env, 2, v8::String::NewFromUtf8(isolate,
- "over the lazy dog."));
+ &env, 3, v8::String::NewFromUtf8(isolate, "The quick brown fox jumps"));
+ CheckEmbedderData(&env, 2,
+ v8::String::NewFromUtf8(isolate, "over the lazy dog."));
CheckEmbedderData(&env, 1, v8::Number::New(isolate, 1.2345));
CheckEmbedderData(&env, 0, v8::Boolean::New(isolate, true));
}
@@ -3084,10 +2320,8 @@ THREADED_TEST(SymbolProperties) {
v8::Local<v8::Object> obj = v8::Object::New(isolate);
v8::Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
- v8::Local<v8::Symbol> sym2 =
- v8::Symbol::New(isolate, v8_str("my-symbol"));
- v8::Local<v8::Symbol> sym3 =
- v8::Symbol::New(isolate, v8_str("sym3"));
+ v8::Local<v8::Symbol> sym2 = v8::Symbol::New(isolate, v8_str("my-symbol"));
+ v8::Local<v8::Symbol> sym3 = v8::Symbol::New(isolate, v8_str("sym3"));
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -3134,11 +2368,11 @@ THREADED_TEST(SymbolProperties) {
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
CHECK_EQ(v8::None, obj->GetPropertyAttributes(sym1));
- CHECK_EQ(0, obj->GetOwnPropertyNames()->Length());
- int num_props = obj->GetPropertyNames()->Length();
+ CHECK_EQ(0u, obj->GetOwnPropertyNames()->Length());
+ unsigned num_props = obj->GetPropertyNames()->Length();
CHECK(obj->Set(v8::String::NewFromUtf8(isolate, "bla"),
v8::Integer::New(isolate, 20)));
- CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(1u, obj->GetOwnPropertyNames()->Length());
CHECK_EQ(num_props + 1, obj->GetPropertyNames()->Length());
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -3147,8 +2381,8 @@ THREADED_TEST(SymbolProperties) {
CHECK(obj->Get(sym3)->IsUndefined());
CHECK(obj->Set(sym3, v8::Integer::New(isolate, 42)));
CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
- CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))->Equals(
- v8::Integer::New(isolate, 42)));
+ CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))
+ ->Equals(v8::Integer::New(isolate, 42)));
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -3156,7 +2390,7 @@ THREADED_TEST(SymbolProperties) {
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
CHECK_EQ(2008, obj->Get(sym2)->Int32Value());
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
- CHECK_EQ(2, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(2u, obj->GetOwnPropertyNames()->Length());
CHECK(obj->Has(sym1));
CHECK(obj->Has(sym2));
@@ -3169,9 +2403,9 @@ THREADED_TEST(SymbolProperties) {
CHECK(obj->Has(v8::String::NewFromUtf8(isolate, "accessor_sym3")));
CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
- CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))->Equals(
- v8::Integer::New(isolate, 42)));
- CHECK_EQ(2, obj->GetOwnPropertyNames()->Length());
+ CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))
+ ->Equals(v8::Integer::New(isolate, 42)));
+ CHECK_EQ(2u, obj->GetOwnPropertyNames()->Length());
// Symbol properties are inherited.
v8::Local<v8::Object> child = v8::Object::New(isolate);
@@ -3179,9 +2413,9 @@ THREADED_TEST(SymbolProperties) {
CHECK(child->Has(sym1));
CHECK_EQ(2002, child->Get(sym1)->Int32Value());
CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
- CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))->Equals(
- v8::Integer::New(isolate, 42)));
- CHECK_EQ(0, child->GetOwnPropertyNames()->Length());
+ CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))
+ ->Equals(v8::Integer::New(isolate, 42)));
+ CHECK_EQ(0u, child->GetOwnPropertyNames()->Length());
}
@@ -3224,11 +2458,11 @@ THREADED_TEST(PrivateProperties) {
CHECK(obj->HasPrivate(priv1));
CHECK_EQ(2002, obj->GetPrivate(priv1)->Int32Value());
- CHECK_EQ(0, obj->GetOwnPropertyNames()->Length());
- int num_props = obj->GetPropertyNames()->Length();
+ CHECK_EQ(0u, obj->GetOwnPropertyNames()->Length());
+ unsigned num_props = obj->GetPropertyNames()->Length();
CHECK(obj->Set(v8::String::NewFromUtf8(isolate, "bla"),
v8::Integer::New(isolate, 20)));
- CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(1u, obj->GetOwnPropertyNames()->Length());
CHECK_EQ(num_props + 1, obj->GetPropertyNames()->Length());
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -3239,7 +2473,7 @@ THREADED_TEST(PrivateProperties) {
CHECK_EQ(2002, obj->GetPrivate(priv1)->Int32Value());
CHECK_EQ(2008, obj->GetPrivate(priv2)->Int32Value());
CHECK_EQ(2002, obj->GetPrivate(priv1)->Int32Value());
- CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(1u, obj->GetOwnPropertyNames()->Length());
CHECK(obj->HasPrivate(priv1));
CHECK(obj->HasPrivate(priv2));
@@ -3247,14 +2481,14 @@ THREADED_TEST(PrivateProperties) {
CHECK(obj->HasPrivate(priv1));
CHECK(!obj->HasPrivate(priv2));
CHECK_EQ(2002, obj->GetPrivate(priv1)->Int32Value());
- CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(1u, obj->GetOwnPropertyNames()->Length());
// Private properties are inherited (for the time being).
v8::Local<v8::Object> child = v8::Object::New(isolate);
child->SetPrototype(obj);
CHECK(child->HasPrivate(priv1));
CHECK_EQ(2002, child->GetPrivate(priv1)->Int32Value());
- CHECK_EQ(0, child->GetOwnPropertyNames()->Length());
+ CHECK_EQ(0u, child->GetOwnPropertyNames()->Length());
}
@@ -3330,12 +2564,12 @@ THREADED_TEST(GlobalPrivates) {
class ScopedArrayBufferContents {
public:
- explicit ScopedArrayBufferContents(
- const v8::ArrayBuffer::Contents& contents)
- : contents_(contents) {}
+ explicit ScopedArrayBufferContents(const v8::ArrayBuffer::Contents& contents)
+ : contents_(contents) {}
~ScopedArrayBufferContents() { free(contents_.Data()); }
void* Data() const { return contents_.Data(); }
size_t ByteLength() const { return contents_.ByteLength(); }
+
private:
const v8::ArrayBuffer::Contents contents_;
};
@@ -3371,10 +2605,11 @@ THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
v8::Handle<v8::Value> result = CompileRun("ab.byteLength");
CHECK_EQ(1024, result->Int32Value());
- result = CompileRun("var u8 = new Uint8Array(ab);"
- "u8[0] = 0xFF;"
- "u8[1] = 0xAA;"
- "u8.length");
+ result = CompileRun(
+ "var u8 = new Uint8Array(ab);"
+ "u8[0] = 0xFF;"
+ "u8[1] = 0xAA;"
+ "u8.length");
CHECK_EQ(1024, result->Int32Value());
CHECK_EQ(0xFF, data[0]);
CHECK_EQ(0xAA, data[1]);
@@ -3391,11 +2626,11 @@ THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Value> result =
- CompileRun("var ab1 = new ArrayBuffer(2);"
- "var u8_a = new Uint8Array(ab1);"
- "u8_a[0] = 0xAA;"
- "u8_a[1] = 0xFF; u8_a.buffer");
+ v8::Local<v8::Value> result = CompileRun(
+ "var ab1 = new ArrayBuffer(2);"
+ "var u8_a = new Uint8Array(ab1);"
+ "u8_a[0] = 0xAA;"
+ "u8_a[1] = 0xFF; u8_a.buffer");
Local<v8::ArrayBuffer> ab1 = Local<v8::ArrayBuffer>::Cast(result);
CheckInternalFieldsAreZero(ab1);
CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
@@ -3409,9 +2644,10 @@ THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
CHECK_EQ(0xAA, result->Int32Value());
result = CompileRun("u8_a[1]");
CHECK_EQ(0xFF, result->Int32Value());
- result = CompileRun("var u8_b = new Uint8Array(ab1);"
- "u8_b[0] = 0xBB;"
- "u8_a[0]");
+ result = CompileRun(
+ "var u8_b = new Uint8Array(ab1);"
+ "u8_b[0] = 0xBB;"
+ "u8_a[0]");
CHECK_EQ(0xBB, result->Int32Value());
result = CompileRun("u8_b[1]");
CHECK_EQ(0xFF, result->Int32Value());
@@ -3445,10 +2681,11 @@ THREADED_TEST(ArrayBuffer_External) {
v8::Handle<v8::Value> result = CompileRun("ab3.byteLength");
CHECK_EQ(100, result->Int32Value());
- result = CompileRun("var u8_b = new Uint8Array(ab3);"
- "u8_b[0] = 0xBB;"
- "u8_b[1] = 0xCC;"
- "u8_b.length");
+ result = CompileRun(
+ "var u8_b = new Uint8Array(ab3);"
+ "u8_b[0] = 0xBB;"
+ "u8_b[1] = 0xCC;"
+ "u8_b.length");
CHECK_EQ(100, result->Int32Value());
CHECK_EQ(0xBB, my_data[0]);
CHECK_EQ(0xCC, my_data[1]);
@@ -3493,19 +2730,18 @@ static void CheckIsNeutered(v8::Handle<v8::TypedArray> ta) {
static void CheckIsTypedArrayVarNeutered(const char* name) {
i::ScopedVector<char> source(1024);
i::SNPrintF(source,
- "%s.byteLength == 0 && %s.byteOffset == 0 && %s.length == 0",
- name, name, name);
+ "%s.byteLength == 0 && %s.byteOffset == 0 && %s.length == 0",
+ name, name, name);
CHECK(CompileRun(source.start())->IsTrue());
v8::Handle<v8::TypedArray> ta =
- v8::Handle<v8::TypedArray>::Cast(CompileRun(name));
+ v8::Handle<v8::TypedArray>::Cast(CompileRun(name));
CheckIsNeutered(ta);
}
template <typename TypedArray, int kElementSize>
static Handle<TypedArray> CreateAndCheck(Handle<v8::ArrayBuffer> ab,
- int byteOffset,
- int length) {
+ int byteOffset, int length) {
v8::Handle<TypedArray> ta = TypedArray::New(ab, byteOffset, length);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
CHECK_EQ(byteOffset, static_cast<int>(ta->ByteOffset()));
@@ -3523,26 +2759,26 @@ THREADED_TEST(ArrayBuffer_NeuteringApi) {
v8::Handle<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1024);
v8::Handle<v8::Uint8Array> u8a =
- CreateAndCheck<v8::Uint8Array, 1>(buffer, 1, 1023);
+ CreateAndCheck<v8::Uint8Array, 1>(buffer, 1, 1023);
v8::Handle<v8::Uint8ClampedArray> u8c =
- CreateAndCheck<v8::Uint8ClampedArray, 1>(buffer, 1, 1023);
+ CreateAndCheck<v8::Uint8ClampedArray, 1>(buffer, 1, 1023);
v8::Handle<v8::Int8Array> i8a =
- CreateAndCheck<v8::Int8Array, 1>(buffer, 1, 1023);
+ CreateAndCheck<v8::Int8Array, 1>(buffer, 1, 1023);
v8::Handle<v8::Uint16Array> u16a =
- CreateAndCheck<v8::Uint16Array, 2>(buffer, 2, 511);
+ CreateAndCheck<v8::Uint16Array, 2>(buffer, 2, 511);
v8::Handle<v8::Int16Array> i16a =
- CreateAndCheck<v8::Int16Array, 2>(buffer, 2, 511);
+ CreateAndCheck<v8::Int16Array, 2>(buffer, 2, 511);
v8::Handle<v8::Uint32Array> u32a =
- CreateAndCheck<v8::Uint32Array, 4>(buffer, 4, 255);
+ CreateAndCheck<v8::Uint32Array, 4>(buffer, 4, 255);
v8::Handle<v8::Int32Array> i32a =
- CreateAndCheck<v8::Int32Array, 4>(buffer, 4, 255);
+ CreateAndCheck<v8::Int32Array, 4>(buffer, 4, 255);
v8::Handle<v8::Float32Array> f32a =
- CreateAndCheck<v8::Float32Array, 4>(buffer, 4, 255);
+ CreateAndCheck<v8::Float32Array, 4>(buffer, 4, 255);
v8::Handle<v8::Float64Array> f64a =
- CreateAndCheck<v8::Float64Array, 8>(buffer, 8, 127);
+ CreateAndCheck<v8::Float64Array, 8>(buffer, 8, 127);
v8::Handle<v8::DataView> dv = v8::DataView::New(buffer, 1, 1023);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
@@ -3587,7 +2823,7 @@ THREADED_TEST(ArrayBuffer_NeuteringScript) {
Local<v8::ArrayBuffer>::Cast(CompileRun("ab"));
v8::Handle<v8::DataView> dv =
- v8::Handle<v8::DataView>::Cast(CompileRun("dv"));
+ v8::Handle<v8::DataView>::Cast(CompileRun("dv"));
ScopedArrayBufferContents contents(ab->Externalize());
ab->Neuter();
@@ -3609,7 +2845,6 @@ THREADED_TEST(ArrayBuffer_NeuteringScript) {
}
-
THREADED_TEST(HiddenProperties) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -3691,35 +2926,6 @@ THREADED_TEST(Regress97784) {
}
-static bool interceptor_for_hidden_properties_called;
-static void InterceptorForHiddenProperties(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- interceptor_for_hidden_properties_called = true;
-}
-
-
-THREADED_TEST(HiddenPropertiesWithInterceptors) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope scope(isolate);
-
- interceptor_for_hidden_properties_called = false;
-
- v8::Local<v8::String> key = v8_str("api-test::hidden-key");
-
- // Associate an interceptor with an object and start setting hidden values.
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
- Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
- instance_templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorForHiddenProperties));
- Local<v8::Function> function = fun_templ->GetFunction();
- Local<v8::Object> obj = function->NewInstance();
- CHECK(obj->SetHiddenValue(key, v8::Integer::New(isolate, 2302)));
- CHECK_EQ(2302, obj->GetHiddenValue(key)->Int32Value());
- CHECK(!interceptor_for_hidden_properties_called);
-}
-
-
THREADED_TEST(External) {
v8::HandleScope scope(CcTest::isolate());
int x = 3;
@@ -3828,15 +3034,15 @@ THREADED_TEST(ResettingGlobalHandleToEmpty) {
}
-template<class T>
+template <class T>
static v8::UniquePersistent<T> PassUnique(v8::UniquePersistent<T> unique) {
return unique.Pass();
}
-template<class T>
+template <class T>
static v8::UniquePersistent<T> ReturnUnique(v8::Isolate* isolate,
- const v8::Persistent<T> & global) {
+ const v8::Persistent<T>& global) {
v8::UniquePersistent<String> unique(isolate, global);
return unique.Pass();
}
@@ -3895,18 +3101,17 @@ THREADED_TEST(UniquePersistent) {
}
-template<typename K, typename V>
+template <typename K, typename V>
class WeakStdMapTraits : public v8::StdMapTraits<K, V> {
public:
- typedef typename v8::PersistentValueMap<K, V, WeakStdMapTraits<K, V> >
- MapType;
+ typedef typename v8::PersistentValueMap<K, V, WeakStdMapTraits<K, V>> MapType;
static const v8::PersistentContainerCallbackType kCallbackType = v8::kWeak;
struct WeakCallbackDataType {
MapType* map;
K key;
};
- static WeakCallbackDataType* WeakCallbackParameter(
- MapType* map, const K& key, Local<V> value) {
+ static WeakCallbackDataType* WeakCallbackParameter(MapType* map, const K& key,
+ Local<V> value) {
WeakCallbackDataType* data = new WeakCallbackDataType;
data->map = map;
data->key = key;
@@ -3920,15 +3125,13 @@ class WeakStdMapTraits : public v8::StdMapTraits<K, V> {
const v8::WeakCallbackData<V, WeakCallbackDataType>& data) {
return data.GetParameter()->key;
}
- static void DisposeCallbackData(WeakCallbackDataType* data) {
- delete data;
- }
+ static void DisposeCallbackData(WeakCallbackDataType* data) { delete data; }
static void Dispose(v8::Isolate* isolate, v8::UniquePersistent<V> value,
- K key) { }
+ K key) {}
};
-template<typename Map>
+template <typename Map>
static void TestPersistentValueMap() {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -3945,10 +3148,10 @@ static void TestPersistentValueMap() {
map.Set(7, expected);
CHECK_EQ(1, static_cast<int>(map.Size()));
obj = map.Get(7);
- CHECK_EQ(expected, obj);
+ CHECK(expected->Equals(obj));
{
typename Map::PersistentValueReference ref = map.GetReference(7);
- CHECK_EQ(expected, ref.NewLocal(isolate));
+ CHECK(expected->Equals(ref.NewLocal(isolate)));
}
v8::UniquePersistent<v8::Object> removed = map.Remove(7);
CHECK_EQ(0, static_cast<int>(map.Size()));
@@ -3962,17 +3165,18 @@ static void TestPersistentValueMap() {
{
typename Map::PersistentValueReference ref;
Local<v8::Object> expected2 = v8::Object::New(isolate);
- removed = map.Set(8,
- v8::UniquePersistent<v8::Object>(isolate, expected2), &ref);
+ removed = map.Set(8, v8::UniquePersistent<v8::Object>(isolate, expected2),
+ &ref);
CHECK_EQ(1, static_cast<int>(map.Size()));
CHECK(expected == removed);
- CHECK_EQ(expected2, ref.NewLocal(isolate));
+ CHECK(expected2->Equals(ref.NewLocal(isolate)));
}
}
CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
if (map.IsWeak()) {
- reinterpret_cast<v8::internal::Isolate*>(isolate)->heap()->
- CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ reinterpret_cast<v8::internal::Isolate*>(isolate)
+ ->heap()
+ ->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
} else {
map.Clear();
}
@@ -3983,11 +3187,12 @@ static void TestPersistentValueMap() {
TEST(PersistentValueMap) {
// Default case, w/o weak callbacks:
- TestPersistentValueMap<v8::StdPersistentValueMap<int, v8::Object> >();
+ TestPersistentValueMap<v8::StdPersistentValueMap<int, v8::Object>>();
// Custom traits with weak callbacks:
typedef v8::PersistentValueMap<int, v8::Object,
- WeakStdMapTraits<int, v8::Object> > WeakPersistentValueMap;
+ WeakStdMapTraits<int, v8::Object>>
+ WeakPersistentValueMap;
TestPersistentValueMap<WeakPersistentValueMap>();
}
@@ -4021,10 +3226,10 @@ TEST(PersistentValueVector) {
CHECK(!vector.IsEmpty());
CHECK_EQ(5, static_cast<int>(vector.Size()));
CHECK(obj3.IsEmpty());
- CHECK_EQ(obj1, vector.Get(0));
- CHECK_EQ(obj1, vector.Get(2));
- CHECK_EQ(obj1, vector.Get(4));
- CHECK_EQ(obj2, vector.Get(1));
+ CHECK(obj1->Equals(vector.Get(0)));
+ CHECK(obj1->Equals(vector.Get(2)));
+ CHECK(obj1->Equals(vector.Get(4)));
+ CHECK(obj2->Equals(vector.Get(1)));
CHECK_EQ(5 + handle_count, global_handles->global_handles_count());
@@ -4096,17 +3301,18 @@ THREADED_TEST(LocalHandle) {
class WeakCallCounter {
public:
- explicit WeakCallCounter(int id) : id_(id), number_of_weak_calls_(0) { }
+ explicit WeakCallCounter(int id) : id_(id), number_of_weak_calls_(0) {}
int id() { return id_; }
void increment() { number_of_weak_calls_++; }
int NumberOfWeakCalls() { return number_of_weak_calls_; }
+
private:
int id_;
int number_of_weak_calls_;
};
-template<typename T>
+template <typename T>
struct WeakCallCounterAndPersistent {
explicit WeakCallCounterAndPersistent(WeakCallCounter* counter)
: counter(counter) {}
@@ -4117,14 +3323,14 @@ struct WeakCallCounterAndPersistent {
template <typename T>
static void WeakPointerCallback(
- const v8::WeakCallbackData<T, WeakCallCounterAndPersistent<T> >& data) {
+ const v8::WeakCallbackData<T, WeakCallCounterAndPersistent<T>>& data) {
CHECK_EQ(1234, data.GetParameter()->counter->id());
data.GetParameter()->counter->increment();
data.GetParameter()->handle.Reset();
}
-template<typename T>
+template <typename T>
static UniqueId MakeUniqueId(const Persistent<T>& p) {
return UniqueId(reinterpret_cast<uintptr_t>(*v8::Utils::OpenPersistent(p)));
}
@@ -4167,10 +3373,10 @@ THREADED_TEST(ApiObjectGroups) {
// Connect group 1 and 2, make a cycle.
{
HandleScope scope(iso);
- CHECK(Local<Object>::New(iso, g1s2.handle.As<Object>())->
- Set(0, Local<Value>::New(iso, g2s2.handle)));
- CHECK(Local<Object>::New(iso, g2s1.handle.As<Object>())->
- Set(0, Local<Value>::New(iso, g1s1.handle)));
+ CHECK(Local<Object>::New(iso, g1s2.handle.As<Object>())
+ ->Set(0, Local<Value>::New(iso, g2s2.handle)));
+ CHECK(Local<Object>::New(iso, g2s1.handle.As<Object>())
+ ->Set(0, Local<Value>::New(iso, g1s1.handle)));
}
{
@@ -4184,8 +3390,8 @@ THREADED_TEST(ApiObjectGroups) {
iso->SetReferenceFromGroup(id2, g2c1.handle);
}
// Do a single full GC, ensure incremental marking is stopped.
- v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
- iso)->heap();
+ v8::internal::Heap* heap =
+ reinterpret_cast<v8::internal::Isolate*>(iso)->heap();
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
// All object should be alive.
@@ -4278,8 +3484,8 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
iso->SetReferenceFromGroup(id2, g2c1.handle);
}
// Do a single full GC, ensure incremental marking is stopped.
- v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
- iso)->heap();
+ v8::internal::Heap* heap =
+ reinterpret_cast<v8::internal::Isolate*>(iso)->heap();
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
// All object should be alive.
@@ -4390,8 +3596,8 @@ THREADED_TEST(ApiObjectGroupsCycle) {
iso->SetReferenceFromGroup(id4, g1s1.handle);
}
// Do a single full GC
- v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
- iso)->heap();
+ v8::internal::Heap* heap =
+ reinterpret_cast<v8::internal::Isolate*>(iso)->heap();
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
// All object should be alive.
@@ -4442,9 +3648,9 @@ THREADED_TEST(WeakRootsSurviveTwoRoundsOfGC) {
weak_obj.handle.Reset(iso, Object::New(iso));
weak_obj.handle.SetWeak(&weak_obj, &WeakPointerCallback);
CHECK(weak_obj.handle.IsWeak());
- Local<Object>::New(iso, weak_obj.handle.As<Object>())->Set(
- v8_str("x"),
- String::NewFromUtf8(iso, "magic cookie", String::kInternalizedString));
+ Local<Object>::New(iso, weak_obj.handle.As<Object>())
+ ->Set(v8_str("x"), String::NewFromUtf8(iso, "magic cookie",
+ String::kInternalizedString));
}
// Do a single full GC
i::Isolate* i_iso = reinterpret_cast<v8::internal::Isolate*>(iso);
@@ -4520,20 +3726,20 @@ TEST(ApiObjectGroupsCycleForScavenger) {
g3s2.handle.MarkPartiallyDependent();
iso->SetObjectGroupId(g1s1.handle, UniqueId(1));
iso->SetObjectGroupId(g1s2.handle, UniqueId(1));
- Local<Object>::New(iso, g1s1.handle.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g2s1.handle));
+ Local<Object>::New(iso, g1s1.handle.As<Object>())
+ ->Set(v8_str("x"), Local<Value>::New(iso, g2s1.handle));
iso->SetObjectGroupId(g2s1.handle, UniqueId(2));
iso->SetObjectGroupId(g2s2.handle, UniqueId(2));
- Local<Object>::New(iso, g2s1.handle.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g3s1.handle));
+ Local<Object>::New(iso, g2s1.handle.As<Object>())
+ ->Set(v8_str("x"), Local<Value>::New(iso, g3s1.handle));
iso->SetObjectGroupId(g3s1.handle, UniqueId(3));
iso->SetObjectGroupId(g3s2.handle, UniqueId(3));
- Local<Object>::New(iso, g3s1.handle.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g1s1.handle));
+ Local<Object>::New(iso, g3s1.handle.As<Object>())
+ ->Set(v8_str("x"), Local<Value>::New(iso, g1s1.handle));
}
- v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
- iso)->heap();
+ v8::internal::Heap* heap =
+ reinterpret_cast<v8::internal::Isolate*>(iso)->heap();
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
// All objects should be alive.
@@ -4554,16 +3760,16 @@ TEST(ApiObjectGroupsCycleForScavenger) {
g3s2.handle.MarkPartiallyDependent();
iso->SetObjectGroupId(g1s1.handle, UniqueId(1));
iso->SetObjectGroupId(g1s2.handle, UniqueId(1));
- Local<Object>::New(iso, g1s1.handle.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g2s1.handle));
+ Local<Object>::New(iso, g1s1.handle.As<Object>())
+ ->Set(v8_str("x"), Local<Value>::New(iso, g2s1.handle));
iso->SetObjectGroupId(g2s1.handle, UniqueId(2));
iso->SetObjectGroupId(g2s2.handle, UniqueId(2));
- Local<Object>::New(iso, g2s1.handle.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g3s1.handle));
+ Local<Object>::New(iso, g2s1.handle.As<Object>())
+ ->Set(v8_str("x"), Local<Value>::New(iso, g3s1.handle));
iso->SetObjectGroupId(g3s1.handle, UniqueId(3));
iso->SetObjectGroupId(g3s2.handle, UniqueId(3));
- Local<Object>::New(iso, g3s1.handle.As<Object>())->Set(
- v8_str("x"), Local<Value>::New(iso, g1s1.handle));
+ Local<Object>::New(iso, g3s1.handle.As<Object>())
+ ->Set(v8_str("x"), Local<Value>::New(iso, g1s1.handle));
}
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -4582,7 +3788,7 @@ THREADED_TEST(ScriptException) {
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ(*exception_value, "panama!");
+ CHECK_EQ(0, strcmp(*exception_value, "panama!"));
}
@@ -4591,11 +3797,14 @@ TEST(TryCatchCustomException) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
v8::TryCatch try_catch;
- CompileRun("function CustomError() { this.a = 'b'; }"
- "(function f() { throw new CustomError(); })();");
+ CompileRun(
+ "function CustomError() { this.a = 'b'; }"
+ "(function f() { throw new CustomError(); })();");
CHECK(try_catch.HasCaught());
- CHECK(try_catch.Exception()->ToObject(isolate)->Get(v8_str("a"))->Equals(
- v8_str("b")));
+ CHECK(try_catch.Exception()
+ ->ToObject(isolate)
+ ->Get(v8_str("a"))
+ ->Equals(v8_str("b")));
}
@@ -4666,8 +3875,8 @@ TEST(MessageHandler2) {
v8::V8::AddMessageListener(check_message_2);
LocalContext context;
v8::Local<v8::Value> error = v8::Exception::Error(v8_str("custom error"));
- v8::Object::Cast(*error)->SetHiddenValue(v8_str("hidden key"),
- v8_str("hidden value"));
+ v8::Object::Cast(*error)
+ ->SetHiddenValue(v8_str("hidden key"), v8_str("hidden value"));
context->Global()->Set(v8_str("error"), error);
CompileRun("throw error;");
CHECK(message_received);
@@ -4679,6 +3888,8 @@ TEST(MessageHandler2) {
static void check_message_3(v8::Handle<v8::Message> message,
v8::Handle<Value> data) {
CHECK(message->IsSharedCrossOrigin());
+ CHECK(message->GetScriptOrigin().ResourceIsSharedCrossOrigin()->Value());
+ CHECK(message->GetScriptOrigin().ResourceIsEmbedderDebugScript()->Value());
CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
message_received = true;
}
@@ -4692,12 +3903,11 @@ TEST(MessageHandler3) {
v8::V8::AddMessageListener(check_message_3);
LocalContext context;
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(isolate, 1),
- v8::Integer::New(isolate, 2),
- v8::True(isolate));
- v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
- &origin);
+ v8::ScriptOrigin(v8_str("6.75"), v8::Integer::New(isolate, 1),
+ v8::Integer::New(isolate, 2), v8::True(isolate),
+ Handle<v8::Integer>(), v8::True(isolate));
+ v8::Handle<v8::Script> script =
+ Script::Compile(v8_str("throw 'error'"), &origin);
script->Run();
CHECK(message_received);
// clear out the message listener
@@ -4721,12 +3931,10 @@ TEST(MessageHandler4) {
v8::V8::AddMessageListener(check_message_4);
LocalContext context;
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(isolate, 1),
- v8::Integer::New(isolate, 2),
- v8::False(isolate));
- v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
- &origin);
+ v8::ScriptOrigin(v8_str("6.75"), v8::Integer::New(isolate, 1),
+ v8::Integer::New(isolate, 2), v8::False(isolate));
+ v8::Handle<v8::Script> script =
+ Script::Compile(v8_str("throw 'error'"), &origin);
script->Run();
CHECK(message_received);
// clear out the message listener
@@ -4735,7 +3943,7 @@ TEST(MessageHandler4) {
static void check_message_5a(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
+ v8::Handle<Value> data) {
CHECK(message->IsSharedCrossOrigin());
CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
message_received = true;
@@ -4743,7 +3951,7 @@ static void check_message_5a(v8::Handle<v8::Message> message,
static void check_message_5b(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
+ v8::Handle<Value> data) {
CHECK(!message->IsSharedCrossOrigin());
CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
message_received = true;
@@ -4758,12 +3966,10 @@ TEST(MessageHandler5) {
v8::V8::AddMessageListener(check_message_5a);
LocalContext context;
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(isolate, 1),
- v8::Integer::New(isolate, 2),
- v8::True(isolate));
- v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
- &origin);
+ v8::ScriptOrigin(v8_str("6.75"), v8::Integer::New(isolate, 1),
+ v8::Integer::New(isolate, 2), v8::True(isolate));
+ v8::Handle<v8::Script> script =
+ Script::Compile(v8_str("throw 'error'"), &origin);
script->Run();
CHECK(message_received);
// clear out the message listener
@@ -4771,13 +3977,9 @@ TEST(MessageHandler5) {
message_received = false;
v8::V8::AddMessageListener(check_message_5b);
- origin =
- v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(isolate, 1),
- v8::Integer::New(isolate, 2),
- v8::False(isolate));
- script = Script::Compile(v8_str("throw 'error'"),
- &origin);
+ origin = v8::ScriptOrigin(v8_str("6.75"), v8::Integer::New(isolate, 1),
+ v8::Integer::New(isolate, 2), v8::False(isolate));
+ script = Script::Compile(v8_str("throw 'error'"), &origin);
script->Run();
CHECK(message_received);
// clear out the message listener
@@ -4785,6 +3987,77 @@ TEST(MessageHandler5) {
}
+TEST(NativeWeakMap) {
+ v8::Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ Local<v8::NativeWeakMap> weak_map(v8::NativeWeakMap::New(isolate));
+ CHECK(!weak_map.IsEmpty());
+
+ LocalContext env;
+ Local<Object> value = v8::Object::New(isolate);
+
+ Local<Object> local1 = v8::Object::New(isolate);
+ CHECK(!weak_map->Has(local1));
+ CHECK(weak_map->Get(local1)->IsUndefined());
+ weak_map->Set(local1, value);
+ CHECK(weak_map->Has(local1));
+ CHECK(value->Equals(weak_map->Get(local1)));
+
+ WeakCallCounter counter(1234);
+ WeakCallCounterAndPersistent<Value> o1(&counter);
+ WeakCallCounterAndPersistent<Value> o2(&counter);
+ WeakCallCounterAndPersistent<Value> s1(&counter);
+ {
+ HandleScope scope(isolate);
+ Local<v8::Object> obj1 = v8::Object::New(isolate);
+ Local<v8::Object> obj2 = v8::Object::New(isolate);
+ Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
+
+ weak_map->Set(obj1, value);
+ weak_map->Set(obj2, value);
+ weak_map->Set(sym1, value);
+
+ o1.handle.Reset(isolate, obj1);
+ o2.handle.Reset(isolate, obj2);
+ s1.handle.Reset(isolate, sym1);
+
+ CHECK(weak_map->Has(local1));
+ CHECK(weak_map->Has(obj1));
+ CHECK(weak_map->Has(obj2));
+ CHECK(weak_map->Has(sym1));
+
+ CHECK(value->Equals(weak_map->Get(local1)));
+ CHECK(value->Equals(weak_map->Get(obj1)));
+ CHECK(value->Equals(weak_map->Get(obj2)));
+ CHECK(value->Equals(weak_map->Get(sym1)));
+ }
+ CcTest::heap()->CollectAllGarbage(TestHeap::Heap::kNoGCFlags);
+ {
+ HandleScope scope(isolate);
+ CHECK(value->Equals(weak_map->Get(local1)));
+ CHECK(value->Equals(weak_map->Get(Local<Value>::New(isolate, o1.handle))));
+ CHECK(value->Equals(weak_map->Get(Local<Value>::New(isolate, o2.handle))));
+ CHECK(value->Equals(weak_map->Get(Local<Value>::New(isolate, s1.handle))));
+ }
+
+ o1.handle.SetWeak(&o1, &WeakPointerCallback);
+ o2.handle.SetWeak(&o2, &WeakPointerCallback);
+ s1.handle.SetWeak(&s1, &WeakPointerCallback);
+
+ CcTest::heap()->CollectAllGarbage(TestHeap::Heap::kNoGCFlags);
+ CHECK_EQ(3, counter.NumberOfWeakCalls());
+
+ CHECK(o1.handle.IsEmpty());
+ CHECK(o2.handle.IsEmpty());
+ CHECK(s1.handle.IsEmpty());
+
+ CHECK(value->Equals(weak_map->Get(local1)));
+ CHECK(weak_map->Delete(local1));
+ CHECK(!weak_map->Has(local1));
+ CHECK(weak_map->Get(local1)->IsUndefined());
+}
+
+
THREADED_TEST(GetSetProperty) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -4855,7 +4128,7 @@ THREADED_TEST(PropertyAttributes) {
CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(exception));
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ("exception", *exception_value);
+ CHECK_EQ(0, strcmp("exception", *exception_value));
try_catch.Reset();
}
@@ -4864,27 +4137,27 @@ THREADED_TEST(Array) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
Local<v8::Array> array = v8::Array::New(context->GetIsolate());
- CHECK_EQ(0, array->Length());
+ CHECK_EQ(0u, array->Length());
CHECK(array->Get(0)->IsUndefined());
CHECK(!array->Has(0));
CHECK(array->Get(100)->IsUndefined());
CHECK(!array->Has(100));
array->Set(2, v8_num(7));
- CHECK_EQ(3, array->Length());
+ CHECK_EQ(3u, array->Length());
CHECK(!array->Has(0));
CHECK(!array->Has(1));
CHECK(array->Has(2));
CHECK_EQ(7, array->Get(2)->Int32Value());
Local<Value> obj = CompileRun("[1, 2, 3]");
Local<v8::Array> arr = obj.As<v8::Array>();
- CHECK_EQ(3, arr->Length());
+ CHECK_EQ(3u, arr->Length());
CHECK_EQ(1, arr->Get(0)->Int32Value());
CHECK_EQ(2, arr->Get(1)->Int32Value());
CHECK_EQ(3, arr->Get(2)->Int32Value());
array = v8::Array::New(context->GetIsolate(), 27);
- CHECK_EQ(27, array->Length());
+ CHECK_EQ(27u, array->Length());
array = v8::Array::New(context->GetIsolate(), -27);
- CHECK_EQ(0, array->Length());
+ CHECK_EQ(0u, array->Length());
}
@@ -4892,8 +4165,7 @@ void HandleF(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::EscapableHandleScope scope(args.GetIsolate());
ApiTestFuzzer::Fuzz();
Local<v8::Array> result = v8::Array::New(args.GetIsolate(), args.Length());
- for (int i = 0; i < args.Length(); i++)
- result->Set(i, args[i]);
+ for (int i = 0; i < args.Length(); i++) result->Set(i, args[i]);
args.GetReturnValue().Set(scope.Escape(result));
}
@@ -4907,29 +4179,29 @@ THREADED_TEST(Vector) {
const char* fun = "f()";
Local<v8::Array> a0 = CompileRun(fun).As<v8::Array>();
- CHECK_EQ(0, a0->Length());
+ CHECK_EQ(0u, a0->Length());
const char* fun2 = "f(11)";
Local<v8::Array> a1 = CompileRun(fun2).As<v8::Array>();
- CHECK_EQ(1, a1->Length());
+ CHECK_EQ(1u, a1->Length());
CHECK_EQ(11, a1->Get(0)->Int32Value());
const char* fun3 = "f(12, 13)";
Local<v8::Array> a2 = CompileRun(fun3).As<v8::Array>();
- CHECK_EQ(2, a2->Length());
+ CHECK_EQ(2u, a2->Length());
CHECK_EQ(12, a2->Get(0)->Int32Value());
CHECK_EQ(13, a2->Get(1)->Int32Value());
const char* fun4 = "f(14, 15, 16)";
Local<v8::Array> a3 = CompileRun(fun4).As<v8::Array>();
- CHECK_EQ(3, a3->Length());
+ CHECK_EQ(3u, a3->Length());
CHECK_EQ(14, a3->Get(0)->Int32Value());
CHECK_EQ(15, a3->Get(1)->Int32Value());
CHECK_EQ(16, a3->Get(2)->Int32Value());
const char* fun5 = "f(17, 18, 19, 20)";
Local<v8::Array> a4 = CompileRun(fun5).As<v8::Array>();
- CHECK_EQ(4, a4->Length());
+ CHECK_EQ(4u, a4->Length());
CHECK_EQ(17, a4->Get(0)->Int32Value());
CHECK_EQ(18, a4->Get(1)->Int32Value());
CHECK_EQ(19, a4->Get(2)->Int32Value());
@@ -4942,20 +4214,20 @@ THREADED_TEST(FunctionCall) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
CompileRun(
- "function Foo() {"
- " var result = [];"
- " for (var i = 0; i < arguments.length; i++) {"
- " result.push(arguments[i]);"
- " }"
- " return result;"
- "}"
- "function ReturnThisSloppy() {"
- " return this;"
- "}"
- "function ReturnThisStrict() {"
- " 'use strict';"
- " return this;"
- "}");
+ "function Foo() {"
+ " var result = [];"
+ " for (var i = 0; i < arguments.length; i++) {"
+ " result.push(arguments[i]);"
+ " }"
+ " return result;"
+ "}"
+ "function ReturnThisSloppy() {"
+ " return this;"
+ "}"
+ "function ReturnThisStrict() {"
+ " 'use strict';"
+ " return this;"
+ "}");
Local<Function> Foo =
Local<Function>::Cast(context->Global()->Get(v8_str("Foo")));
Local<Function> ReturnThisSloppy =
@@ -4965,35 +4237,30 @@ THREADED_TEST(FunctionCall) {
v8::Handle<Value>* args0 = NULL;
Local<v8::Array> a0 = Local<v8::Array>::Cast(Foo->Call(Foo, 0, args0));
- CHECK_EQ(0, a0->Length());
+ CHECK_EQ(0u, a0->Length());
- v8::Handle<Value> args1[] = { v8_num(1.1) };
+ v8::Handle<Value> args1[] = {v8_num(1.1)};
Local<v8::Array> a1 = Local<v8::Array>::Cast(Foo->Call(Foo, 1, args1));
- CHECK_EQ(1, a1->Length());
+ CHECK_EQ(1u, a1->Length());
CHECK_EQ(1.1, a1->Get(v8::Integer::New(isolate, 0))->NumberValue());
- v8::Handle<Value> args2[] = { v8_num(2.2),
- v8_num(3.3) };
+ v8::Handle<Value> args2[] = {v8_num(2.2), v8_num(3.3)};
Local<v8::Array> a2 = Local<v8::Array>::Cast(Foo->Call(Foo, 2, args2));
- CHECK_EQ(2, a2->Length());
+ CHECK_EQ(2u, a2->Length());
CHECK_EQ(2.2, a2->Get(v8::Integer::New(isolate, 0))->NumberValue());
CHECK_EQ(3.3, a2->Get(v8::Integer::New(isolate, 1))->NumberValue());
- v8::Handle<Value> args3[] = { v8_num(4.4),
- v8_num(5.5),
- v8_num(6.6) };
+ v8::Handle<Value> args3[] = {v8_num(4.4), v8_num(5.5), v8_num(6.6)};
Local<v8::Array> a3 = Local<v8::Array>::Cast(Foo->Call(Foo, 3, args3));
- CHECK_EQ(3, a3->Length());
+ CHECK_EQ(3u, a3->Length());
CHECK_EQ(4.4, a3->Get(v8::Integer::New(isolate, 0))->NumberValue());
CHECK_EQ(5.5, a3->Get(v8::Integer::New(isolate, 1))->NumberValue());
CHECK_EQ(6.6, a3->Get(v8::Integer::New(isolate, 2))->NumberValue());
- v8::Handle<Value> args4[] = { v8_num(7.7),
- v8_num(8.8),
- v8_num(9.9),
- v8_num(10.11) };
+ v8::Handle<Value> args4[] = {v8_num(7.7), v8_num(8.8), v8_num(9.9),
+ v8_num(10.11)};
Local<v8::Array> a4 = Local<v8::Array>::Cast(Foo->Call(Foo, 4, args4));
- CHECK_EQ(4, a4->Length());
+ CHECK_EQ(4u, a4->Length());
CHECK_EQ(7.7, a4->Get(v8::Integer::New(isolate, 0))->NumberValue());
CHECK_EQ(8.8, a4->Get(v8::Integer::New(isolate, 1))->NumberValue());
CHECK_EQ(9.9, a4->Get(v8::Integer::New(isolate, 2))->NumberValue());
@@ -5031,47 +4298,42 @@ THREADED_TEST(ConstructCall) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
CompileRun(
- "function Foo() {"
- " var result = [];"
- " for (var i = 0; i < arguments.length; i++) {"
- " result.push(arguments[i]);"
- " }"
- " return result;"
- "}");
+ "function Foo() {"
+ " var result = [];"
+ " for (var i = 0; i < arguments.length; i++) {"
+ " result.push(arguments[i]);"
+ " }"
+ " return result;"
+ "}");
Local<Function> Foo =
Local<Function>::Cast(context->Global()->Get(v8_str("Foo")));
v8::Handle<Value>* args0 = NULL;
Local<v8::Array> a0 = Local<v8::Array>::Cast(Foo->NewInstance(0, args0));
- CHECK_EQ(0, a0->Length());
+ CHECK_EQ(0u, a0->Length());
- v8::Handle<Value> args1[] = { v8_num(1.1) };
+ v8::Handle<Value> args1[] = {v8_num(1.1)};
Local<v8::Array> a1 = Local<v8::Array>::Cast(Foo->NewInstance(1, args1));
- CHECK_EQ(1, a1->Length());
+ CHECK_EQ(1u, a1->Length());
CHECK_EQ(1.1, a1->Get(v8::Integer::New(isolate, 0))->NumberValue());
- v8::Handle<Value> args2[] = { v8_num(2.2),
- v8_num(3.3) };
+ v8::Handle<Value> args2[] = {v8_num(2.2), v8_num(3.3)};
Local<v8::Array> a2 = Local<v8::Array>::Cast(Foo->NewInstance(2, args2));
- CHECK_EQ(2, a2->Length());
+ CHECK_EQ(2u, a2->Length());
CHECK_EQ(2.2, a2->Get(v8::Integer::New(isolate, 0))->NumberValue());
CHECK_EQ(3.3, a2->Get(v8::Integer::New(isolate, 1))->NumberValue());
- v8::Handle<Value> args3[] = { v8_num(4.4),
- v8_num(5.5),
- v8_num(6.6) };
+ v8::Handle<Value> args3[] = {v8_num(4.4), v8_num(5.5), v8_num(6.6)};
Local<v8::Array> a3 = Local<v8::Array>::Cast(Foo->NewInstance(3, args3));
- CHECK_EQ(3, a3->Length());
+ CHECK_EQ(3u, a3->Length());
CHECK_EQ(4.4, a3->Get(v8::Integer::New(isolate, 0))->NumberValue());
CHECK_EQ(5.5, a3->Get(v8::Integer::New(isolate, 1))->NumberValue());
CHECK_EQ(6.6, a3->Get(v8::Integer::New(isolate, 2))->NumberValue());
- v8::Handle<Value> args4[] = { v8_num(7.7),
- v8_num(8.8),
- v8_num(9.9),
- v8_num(10.11) };
+ v8::Handle<Value> args4[] = {v8_num(7.7), v8_num(8.8), v8_num(9.9),
+ v8_num(10.11)};
Local<v8::Array> a4 = Local<v8::Array>::Cast(Foo->NewInstance(4, args4));
- CHECK_EQ(4, a4->Length());
+ CHECK_EQ(4u, a4->Length());
CHECK_EQ(7.7, a4->Get(v8::Integer::New(isolate, 0))->NumberValue());
CHECK_EQ(8.8, a4->Get(v8::Integer::New(isolate, 1))->NumberValue());
CHECK_EQ(9.9, a4->Get(v8::Integer::New(isolate, 2))->NumberValue());
@@ -5082,7 +4344,7 @@ THREADED_TEST(ConstructCall) {
static void CheckUncle(v8::TryCatch* try_catch) {
CHECK(try_catch->HasCaught());
String::Utf8Value str_value(try_catch->Exception());
- CHECK_EQ(*str_value, "uncle?");
+ CHECK_EQ(0, strcmp(*str_value, "uncle?"));
try_catch->Reset();
}
@@ -5193,9 +4455,9 @@ THREADED_TEST(ConversionException) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
CompileRun(
- "function TestClass() { };"
- "TestClass.prototype.toString = function () { throw 'uncle?'; };"
- "var obj = new TestClass();");
+ "function TestClass() { };"
+ "TestClass.prototype.toString = function () { throw 'uncle?'; };"
+ "var obj = new TestClass();");
Local<Value> obj = env->Global()->Get(v8_str("obj"));
v8::TryCatch try_catch(isolate);
@@ -5230,15 +4492,15 @@ THREADED_TEST(ConversionException) {
CheckUncle(&try_catch);
uint32_t uint32_value = obj->Uint32Value();
- CHECK_EQ(0, uint32_value);
+ CHECK_EQ(0u, uint32_value);
CheckUncle(&try_catch);
double number_value = obj->NumberValue();
- CHECK_NE(0, std::isnan(number_value));
+ CHECK(std::isnan(number_value));
CheckUncle(&try_catch);
int64_t integer_value = obj->IntegerValue();
- CHECK_EQ(0.0, static_cast<double>(integer_value));
+ CHECK_EQ(0, integer_value);
CheckUncle(&try_catch);
}
@@ -5270,12 +4532,12 @@ THREADED_TEST(APICatch) {
v8::FunctionTemplate::New(isolate, ThrowFromC));
LocalContext context(0, templ);
CompileRun(
- "var thrown = false;"
- "try {"
- " ThrowFromC();"
- "} catch (e) {"
- " thrown = true;"
- "}");
+ "var thrown = false;"
+ "try {"
+ " ThrowFromC();"
+ "} catch (e) {"
+ " thrown = true;"
+ "}");
Local<Value> thrown = context->Global()->Get(v8_str("thrown"));
CHECK(thrown->BooleanValue());
}
@@ -5305,23 +4567,22 @@ TEST(TryCatchInTryFinally) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("CCatcher"),
- v8::FunctionTemplate::New(isolate, CCatcher));
+ templ->Set(v8_str("CCatcher"), v8::FunctionTemplate::New(isolate, CCatcher));
LocalContext context(0, templ);
- Local<Value> result = CompileRun("try {"
- " try {"
- " CCatcher('throw 7;');"
- " } finally {"
- " }"
- "} catch (e) {"
- "}");
+ Local<Value> result = CompileRun(
+ "try {"
+ " try {"
+ " CCatcher('throw 7;');"
+ " } finally {"
+ " }"
+ "} catch (e) {"
+ "}");
CHECK(result->IsTrue());
}
-static void check_reference_error_message(
- v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void check_reference_error_message(v8::Handle<v8::Message> message,
+ v8::Handle<v8::Value> data) {
const char* reference_error = "Uncaught ReferenceError: asdf is not defined";
CHECK(message->Get()->Equals(v8_str(reference_error)));
}
@@ -5344,17 +4605,19 @@ TEST(APIThrowMessageOverwrittenToString) {
templ->Set(v8_str("fail"), v8::FunctionTemplate::New(isolate, Fail));
LocalContext context(NULL, templ);
CompileRun("asdf;");
- CompileRun("var limit = {};"
- "limit.valueOf = fail;"
- "Error.stackTraceLimit = limit;");
+ CompileRun(
+ "var limit = {};"
+ "limit.valueOf = fail;"
+ "Error.stackTraceLimit = limit;");
CompileRun("asdf");
CompileRun("Array.prototype.pop = fail;");
CompileRun("Object.prototype.hasOwnProperty = fail;");
CompileRun("Object.prototype.toString = function f() { return 'Yikes'; }");
CompileRun("Number.prototype.toString = function f() { return 'Yikes'; }");
CompileRun("String.prototype.toString = function f() { return 'Yikes'; }");
- CompileRun("ReferenceError.prototype.toString ="
- " function() { return 'Whoops' }");
+ CompileRun(
+ "ReferenceError.prototype.toString ="
+ " function() { return 'Whoops' }");
CompileRun("asdf;");
CompileRun("ReferenceError.prototype.constructor.name = void 0;");
CompileRun("asdf;");
@@ -5366,18 +4629,18 @@ TEST(APIThrowMessageOverwrittenToString) {
CompileRun("asdf;");
v8::Handle<Value> string = CompileRun("try { asdf; } catch(e) { e + ''; }");
CHECK(string->Equals(v8_str("Whoops")));
- CompileRun("ReferenceError.prototype.constructor = new Object();"
- "ReferenceError.prototype.constructor.name = 1;"
- "Number.prototype.toString = function() { return 'Whoops'; };"
- "ReferenceError.prototype.toString = Object.prototype.toString;");
+ CompileRun(
+ "ReferenceError.prototype.constructor = new Object();"
+ "ReferenceError.prototype.constructor.name = 1;"
+ "Number.prototype.toString = function() { return 'Whoops'; };"
+ "ReferenceError.prototype.toString = Object.prototype.toString;");
CompileRun("asdf;");
v8::V8::RemoveMessageListeners(check_reference_error_message);
}
-static void check_custom_error_tostring(
- v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void check_custom_error_tostring(v8::Handle<v8::Message> message,
+ v8::Handle<v8::Value> data) {
const char* uncaught_error = "Uncaught MyError toString";
CHECK(message->Get()->Equals(v8_str(uncaught_error)));
}
@@ -5388,22 +4651,21 @@ TEST(CustomErrorToString) {
v8::HandleScope scope(context->GetIsolate());
v8::V8::AddMessageListener(check_custom_error_tostring);
CompileRun(
- "function MyError(name, message) { "
- " this.name = name; "
- " this.message = message; "
- "} "
- "MyError.prototype = Object.create(Error.prototype); "
- "MyError.prototype.toString = function() { "
- " return 'MyError toString'; "
- "}; "
- "throw new MyError('my name', 'my message'); ");
+ "function MyError(name, message) { "
+ " this.name = name; "
+ " this.message = message; "
+ "} "
+ "MyError.prototype = Object.create(Error.prototype); "
+ "MyError.prototype.toString = function() { "
+ " return 'MyError toString'; "
+ "}; "
+ "throw new MyError('my name', 'my message'); ");
v8::V8::RemoveMessageListeners(check_custom_error_tostring);
}
-static void check_custom_error_message(
- v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void check_custom_error_message(v8::Handle<v8::Message> message,
+ v8::Handle<v8::Value> data) {
const char* uncaught_error = "Uncaught MyError: my message";
printf("%s\n", *v8::String::Utf8Value(message->Get()));
CHECK(message->Get()->Equals(v8_str(uncaught_error)));
@@ -5417,37 +4679,37 @@ TEST(CustomErrorMessage) {
// Handlebars.
CompileRun(
- "function MyError(msg) { "
- " this.name = 'MyError'; "
- " this.message = msg; "
- "} "
- "MyError.prototype = new Error(); "
- "throw new MyError('my message'); ");
+ "function MyError(msg) { "
+ " this.name = 'MyError'; "
+ " this.message = msg; "
+ "} "
+ "MyError.prototype = new Error(); "
+ "throw new MyError('my message'); ");
// Closure.
CompileRun(
- "function MyError(msg) { "
- " this.name = 'MyError'; "
- " this.message = msg; "
- "} "
- "inherits = function(childCtor, parentCtor) { "
- " function tempCtor() {}; "
- " tempCtor.prototype = parentCtor.prototype; "
- " childCtor.superClass_ = parentCtor.prototype; "
- " childCtor.prototype = new tempCtor(); "
- " childCtor.prototype.constructor = childCtor; "
- "}; "
- "inherits(MyError, Error); "
- "throw new MyError('my message'); ");
+ "function MyError(msg) { "
+ " this.name = 'MyError'; "
+ " this.message = msg; "
+ "} "
+ "inherits = function(childCtor, parentCtor) { "
+ " function tempCtor() {}; "
+ " tempCtor.prototype = parentCtor.prototype; "
+ " childCtor.superClass_ = parentCtor.prototype; "
+ " childCtor.prototype = new tempCtor(); "
+ " childCtor.prototype.constructor = childCtor; "
+ "}; "
+ "inherits(MyError, Error); "
+ "throw new MyError('my message'); ");
// Object.create.
CompileRun(
- "function MyError(msg) { "
- " this.name = 'MyError'; "
- " this.message = msg; "
- "} "
- "MyError.prototype = Object.create(Error.prototype); "
- "throw new MyError('my message'); ");
+ "function MyError(msg) { "
+ " this.name = 'MyError'; "
+ " this.message = msg; "
+ "} "
+ "MyError.prototype = Object.create(Error.prototype); "
+ "throw new MyError('my message'); ");
v8::V8::RemoveMessageListeners(check_custom_error_message);
}
@@ -5522,11 +4784,10 @@ THREADED_TEST(ExternalScriptException) {
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ("konto", *exception_value);
+ CHECK_EQ(0, strcmp("konto", *exception_value));
}
-
void CThrowCountDown(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
CHECK_EQ(4, args.Length());
@@ -5536,13 +4797,9 @@ void CThrowCountDown(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetIsolate()->ThrowException(v8_str("FromC"));
return;
} else {
- Local<v8::Object> global =
- args.GetIsolate()->GetCurrentContext()->Global();
+ Local<v8::Object> global = args.GetIsolate()->GetCurrentContext()->Global();
Local<Value> fun = global->Get(v8_str("JSThrowCountDown"));
- v8::Handle<Value> argv[] = { v8_num(count - 1),
- args[1],
- args[2],
- args[3] };
+ v8::Handle<Value> argv[] = {v8_num(count - 1), args[1], args[2], args[3]};
if (count % cInterval == 0) {
v8::TryCatch try_catch;
Local<Value> result = fun.As<Function>()->Call(global, 4, argv);
@@ -5582,13 +4839,14 @@ THREADED_TEST(EvalInTryFinally) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch;
- CompileRun("(function() {"
- " try {"
- " eval('asldkf (*&^&*^');"
- " } finally {"
- " return;"
- " }"
- "})()");
+ CompileRun(
+ "(function() {"
+ " try {"
+ " eval('asldkf (*&^&*^');"
+ " } finally {"
+ " return;"
+ " }"
+ "})()");
CHECK(!try_catch.HasCaught());
}
@@ -5622,23 +4880,23 @@ TEST(ExceptionOrder) {
v8::FunctionTemplate::New(isolate, CThrowCountDown));
LocalContext context(0, templ);
CompileRun(
- "function JSThrowCountDown(count, jsInterval, cInterval, expected) {"
- " if (count == 0) throw 'FromJS';"
- " if (count % jsInterval == 0) {"
- " try {"
- " var value = CThrowCountDown(count - 1,"
- " jsInterval,"
- " cInterval,"
- " expected);"
- " check(false, count, expected);"
- " return value;"
- " } catch (e) {"
- " check(true, count, expected);"
- " }"
- " } else {"
- " return CThrowCountDown(count - 1, jsInterval, cInterval, expected);"
- " }"
- "}");
+ "function JSThrowCountDown(count, jsInterval, cInterval, expected) {"
+ " if (count == 0) throw 'FromJS';"
+ " if (count % jsInterval == 0) {"
+ " try {"
+ " var value = CThrowCountDown(count - 1,"
+ " jsInterval,"
+ " cInterval,"
+ " expected);"
+ " check(false, count, expected);"
+ " return value;"
+ " } catch (e) {"
+ " check(true, count, expected);"
+ " }"
+ " } else {"
+ " return CThrowCountDown(count - 1, jsInterval, cInterval, expected);"
+ " }"
+ "}");
Local<Function> fun =
Local<Function>::Cast(context->Global()->Get(v8_str("JSThrowCountDown")));
@@ -5646,27 +4904,27 @@ TEST(ExceptionOrder) {
// count jsInterval cInterval expected
// *JS[4] *C[3] @JS[2] C[1] JS[0]
- v8::Handle<Value> a0[argc] = { v8_num(4), v8_num(2), v8_num(3), v8_num(2) };
+ v8::Handle<Value> a0[argc] = {v8_num(4), v8_num(2), v8_num(3), v8_num(2)};
fun->Call(fun, argc, a0);
// JS[5] *C[4] JS[3] @C[2] JS[1] C[0]
- v8::Handle<Value> a1[argc] = { v8_num(5), v8_num(6), v8_num(1), v8_num(2) };
+ v8::Handle<Value> a1[argc] = {v8_num(5), v8_num(6), v8_num(1), v8_num(2)};
fun->Call(fun, argc, a1);
// JS[6] @C[5] JS[4] C[3] JS[2] C[1] JS[0]
- v8::Handle<Value> a2[argc] = { v8_num(6), v8_num(7), v8_num(5), v8_num(5) };
+ v8::Handle<Value> a2[argc] = {v8_num(6), v8_num(7), v8_num(5), v8_num(5)};
fun->Call(fun, argc, a2);
// @JS[6] C[5] JS[4] C[3] JS[2] C[1] JS[0]
- v8::Handle<Value> a3[argc] = { v8_num(6), v8_num(6), v8_num(7), v8_num(6) };
+ v8::Handle<Value> a3[argc] = {v8_num(6), v8_num(6), v8_num(7), v8_num(6)};
fun->Call(fun, argc, a3);
// JS[6] *C[5] @JS[4] C[3] JS[2] C[1] JS[0]
- v8::Handle<Value> a4[argc] = { v8_num(6), v8_num(4), v8_num(5), v8_num(4) };
+ v8::Handle<Value> a4[argc] = {v8_num(6), v8_num(4), v8_num(5), v8_num(4)};
fun->Call(fun, argc, a4);
// JS[6] C[5] *JS[4] @C[3] JS[2] C[1] JS[0]
- v8::Handle<Value> a5[argc] = { v8_num(6), v8_num(4), v8_num(3), v8_num(3) };
+ v8::Handle<Value> a5[argc] = {v8_num(6), v8_num(4), v8_num(3), v8_num(3)};
fun->Call(fun, argc, a5);
}
@@ -5685,16 +4943,16 @@ THREADED_TEST(ThrowValues) {
templ->Set(v8_str("Throw"), v8::FunctionTemplate::New(isolate, ThrowValue));
LocalContext context(0, templ);
v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
- "function Run(obj) {"
- " try {"
- " Throw(obj);"
- " } catch (e) {"
- " return e;"
- " }"
- " return 'no exception';"
- "}"
- "[Run('str'), Run(1), Run(0), Run(null), Run(void 0)];"));
- CHECK_EQ(5, result->Length());
+ "function Run(obj) {"
+ " try {"
+ " Throw(obj);"
+ " } catch (e) {"
+ " return e;"
+ " }"
+ " return 'no exception';"
+ "}"
+ "[Run('str'), Run(1), Run(0), Run(null), Run(void 0)];"));
+ CHECK_EQ(5u, result->Length());
CHECK(result->Get(v8::Integer::New(isolate, 0))->IsString());
CHECK(result->Get(v8::Integer::New(isolate, 1))->IsNumber());
CHECK_EQ(1, result->Get(v8::Integer::New(isolate, 1))->Int32Value());
@@ -5820,8 +5078,8 @@ void TryCatchMixedNestingCheck(v8::TryCatch* try_catch) {
Handle<Message> message = try_catch->Message();
Handle<Value> resource = message->GetScriptOrigin().ResourceName();
CHECK_EQ(0, strcmp(*v8::String::Utf8Value(resource), "inner"));
- CHECK_EQ(0, strcmp(*v8::String::Utf8Value(message->Get()),
- "Uncaught Error: a"));
+ CHECK_EQ(0,
+ strcmp(*v8::String::Utf8Value(message->Get()), "Uncaught Error: a"));
CHECK_EQ(1, message->GetLineNumber());
CHECK_EQ(6, message->GetStartColumn());
}
@@ -5912,11 +5170,11 @@ THREADED_TEST(Equality) {
CHECK(v8_str("a")->Equals(v8_str("a")));
CHECK(!v8_str("a")->Equals(v8_str("b")));
- CHECK_EQ(v8_str("a"), v8_str("a"));
- CHECK_NE(v8_str("a"), v8_str("b"));
- CHECK_EQ(v8_num(1), v8_num(1));
- CHECK_EQ(v8_num(1.00), v8_num(1));
- CHECK_NE(v8_num(1), v8_num(2));
+ CHECK(v8_str("a")->Equals(v8_str("a")));
+ CHECK(!v8_str("a")->Equals(v8_str("b")));
+ CHECK(v8_num(1)->Equals(v8_num(1)));
+ CHECK(v8_num(1.00)->Equals(v8_num(1)));
+ CHECK(!v8_num(1)->Equals(v8_num(2)));
// Assume String is not internalized.
CHECK(v8_str("a")->StrictEquals(v8_str("a")));
@@ -5925,7 +5183,7 @@ THREADED_TEST(Equality) {
CHECK(v8_num(1)->StrictEquals(v8_num(1)));
CHECK(!v8_num(1)->StrictEquals(v8_num(2)));
CHECK(v8_num(0.0)->StrictEquals(v8_num(-0.0)));
- Local<Value> not_a_number = v8_num(v8::base::OS::nan_value());
+ Local<Value> not_a_number = v8_num(std::numeric_limits<double>::quiet_NaN());
CHECK(!not_a_number->StrictEquals(not_a_number));
CHECK(v8::False(isolate)->StrictEquals(v8::False(isolate)));
CHECK(!v8::False(isolate)->StrictEquals(v8::Undefined(isolate)));
@@ -5951,16 +5209,15 @@ THREADED_TEST(MultiRun) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
Local<Script> script = v8_compile("x");
- for (int i = 0; i < 10; i++)
- script->Run();
+ for (int i = 0; i < 10; i++) script->Run();
}
static void GetXValue(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CHECK_EQ(info.Data(), v8_str("donut"));
- CHECK_EQ(name, v8_str("x"));
+ CHECK(info.Data()->Equals(v8_str("donut")));
+ CHECK(name->Equals(v8_str("x")));
info.GetReturnValue().Set(name);
}
@@ -5975,7 +5232,7 @@ THREADED_TEST(SimplePropertyRead) {
Local<Script> script = v8_compile("obj.x");
for (int i = 0; i < 10; i++) {
Local<Value> result = script->Run();
- CHECK_EQ(result, v8_str("x"));
+ CHECK(result->Equals(v8_str("x")));
}
}
@@ -6003,7 +5260,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
"Object.defineProperty(obj, 'x', desc);"
"obj.x");
result = script_define->Run();
- CHECK_EQ(result, v8_num(42));
+ CHECK(result->Equals(v8_num(42)));
// Check that the accessor is still configurable
result = script_desc->Run();
@@ -6016,7 +5273,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
"Object.defineProperty(obj, 'x', desc);"
"obj.x");
result = script_define->Run();
- CHECK_EQ(result, v8_num(43));
+ CHECK(result->Equals(v8_num(43)));
result = script_desc->Run();
CHECK_EQ(result->BooleanValue(), false);
@@ -6025,7 +5282,8 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
result = script_define->Run();
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
+ CHECK_EQ(0,
+ strcmp(*exception_value, "TypeError: Cannot redefine property: x"));
}
@@ -6051,7 +5309,7 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
"Object.defineProperty(obj, 'x', desc);"
"obj.x");
result = script_define->Run();
- CHECK_EQ(result, v8_num(42));
+ CHECK(result->Equals(v8_num(42)));
result = script_desc->Run();
@@ -6064,7 +5322,7 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
"Object.defineProperty(obj, 'x', desc);"
"obj.x");
result = script_define->Run();
- CHECK_EQ(result, v8_num(43));
+ CHECK(result->Equals(v8_num(43)));
result = script_desc->Run();
CHECK_EQ(result->BooleanValue(), false);
@@ -6073,7 +5331,8 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
result = script_define->Run();
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
+ CHECK_EQ(0,
+ strcmp(*exception_value, "TypeError: Cannot redefine property: x"));
}
@@ -6095,14 +5354,14 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
CHECK(CompileRun("obj1.x")->IsUndefined());
CHECK(CompileRun("obj2.x")->IsUndefined());
- CHECK(GetGlobalProperty(&context, "obj1")->
- SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(GetGlobalProperty(&context, "obj1")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
ExpectString("obj1.x", "x");
CHECK(CompileRun("obj2.x")->IsUndefined());
- CHECK(GetGlobalProperty(&context, "obj2")->
- SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(GetGlobalProperty(&context, "obj2")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
ExpectString("obj1.x", "x");
ExpectString("obj2.x", "x");
@@ -6110,14 +5369,16 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
- CompileRun("Object.defineProperty(obj1, 'x',"
- "{ get: function() { return 'y'; }, configurable: true })");
+ CompileRun(
+ "Object.defineProperty(obj1, 'x',"
+ "{ get: function() { return 'y'; }, configurable: true })");
ExpectString("obj1.x", "y");
ExpectString("obj2.x", "x");
- CompileRun("Object.defineProperty(obj2, 'x',"
- "{ get: function() { return 'y'; }, configurable: true })");
+ CompileRun(
+ "Object.defineProperty(obj2, 'x',"
+ "{ get: function() { return 'y'; }, configurable: true })");
ExpectString("obj1.x", "y");
ExpectString("obj2.x", "y");
@@ -6125,10 +5386,10 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
- CHECK(GetGlobalProperty(&context, "obj1")->
- SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
- CHECK(GetGlobalProperty(&context, "obj2")->
- SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(GetGlobalProperty(&context, "obj1")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(GetGlobalProperty(&context, "obj2")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
ExpectString("obj1.x", "x");
ExpectString("obj2.x", "x");
@@ -6137,10 +5398,12 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
// Define getters/setters, but now make them not configurable.
- CompileRun("Object.defineProperty(obj1, 'x',"
- "{ get: function() { return 'z'; }, configurable: false })");
- CompileRun("Object.defineProperty(obj2, 'x',"
- "{ get: function() { return 'z'; }, configurable: false })");
+ CompileRun(
+ "Object.defineProperty(obj1, 'x',"
+ "{ get: function() { return 'z'; }, configurable: false })");
+ CompileRun(
+ "Object.defineProperty(obj2, 'x',"
+ "{ get: function() { return 'z'; }, configurable: false })");
ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
@@ -6148,10 +5411,10 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
ExpectString("obj1.x", "z");
ExpectString("obj2.x", "z");
- CHECK(!GetGlobalProperty(&context, "obj1")->
- SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
- CHECK(!GetGlobalProperty(&context, "obj2")->
- SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(!GetGlobalProperty(&context, "obj1")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(!GetGlobalProperty(&context, "obj2")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
ExpectString("obj1.x", "z");
ExpectString("obj2.x", "z");
@@ -6167,14 +5430,12 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
context->Global()->Set(v8_str("obj1"), templ->NewInstance());
CompileRun("var obj2 = {};");
- CHECK(GetGlobalProperty(&context, "obj1")->SetAccessor(
- v8_str("x"),
- GetXValue, NULL,
- v8_str("donut"), v8::DEFAULT, v8::DontDelete));
- CHECK(GetGlobalProperty(&context, "obj2")->SetAccessor(
- v8_str("x"),
- GetXValue, NULL,
- v8_str("donut"), v8::DEFAULT, v8::DontDelete));
+ CHECK(GetGlobalProperty(&context, "obj1")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"),
+ v8::DEFAULT, v8::DontDelete));
+ CHECK(GetGlobalProperty(&context, "obj2")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"),
+ v8::DEFAULT, v8::DontDelete));
ExpectString("obj1.x", "x");
ExpectString("obj2.x", "x");
@@ -6182,26 +5443,30 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
- CHECK(!GetGlobalProperty(&context, "obj1")->
- SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
- CHECK(!GetGlobalProperty(&context, "obj2")->
- SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(!GetGlobalProperty(&context, "obj1")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(!GetGlobalProperty(&context, "obj2")
+ ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
{
v8::TryCatch try_catch;
- CompileRun("Object.defineProperty(obj1, 'x',"
+ CompileRun(
+ "Object.defineProperty(obj1, 'x',"
"{get: function() { return 'func'; }})");
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
+ CHECK_EQ(
+ 0, strcmp(*exception_value, "TypeError: Cannot redefine property: x"));
}
{
v8::TryCatch try_catch;
- CompileRun("Object.defineProperty(obj2, 'x',"
+ CompileRun(
+ "Object.defineProperty(obj2, 'x',"
"{get: function() { return 'func'; }})");
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ(*exception_value, "TypeError: Cannot redefine property: x");
+ CHECK_EQ(
+ 0, strcmp(*exception_value, "TypeError: Cannot redefine property: x"));
}
}
@@ -6209,8 +5474,8 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
static void Get239Value(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CHECK_EQ(info.Data(), v8_str("donut"));
- CHECK_EQ(name, v8_str("239"));
+ CHECK(info.Data()->Equals(v8_str("donut")));
+ CHECK(name->Equals(v8_str("239")));
info.GetReturnValue().Set(name);
}
@@ -6224,14 +5489,10 @@ THREADED_TEST(ElementAPIAccessor) {
context->Global()->Set(v8_str("obj1"), templ->NewInstance());
CompileRun("var obj2 = {};");
- CHECK(GetGlobalProperty(&context, "obj1")->SetAccessor(
- v8_str("239"),
- Get239Value, NULL,
- v8_str("donut")));
- CHECK(GetGlobalProperty(&context, "obj2")->SetAccessor(
- v8_str("239"),
- Get239Value, NULL,
- v8_str("donut")));
+ CHECK(GetGlobalProperty(&context, "obj1")
+ ->SetAccessor(v8_str("239"), Get239Value, NULL, v8_str("donut")));
+ CHECK(GetGlobalProperty(&context, "obj2")
+ ->SetAccessor(v8_str("239"), Get239Value, NULL, v8_str("donut")));
ExpectString("obj1[239]", "239");
ExpectString("obj2[239]", "239");
@@ -6243,12 +5504,11 @@ THREADED_TEST(ElementAPIAccessor) {
v8::Persistent<Value> xValue;
-static void SetXValue(Local<String> name,
- Local<Value> value,
+static void SetXValue(Local<String> name, Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- CHECK_EQ(value, v8_num(4));
- CHECK_EQ(info.Data(), v8_str("donut"));
- CHECK_EQ(name, v8_str("x"));
+ CHECK(value->Equals(v8_num(4)));
+ CHECK(info.Data()->Equals(v8_str("donut")));
+ CHECK(name->Equals(v8_str("x")));
CHECK(xValue.IsEmpty());
xValue.Reset(info.GetIsolate(), value);
}
@@ -6265,7 +5525,7 @@ THREADED_TEST(SimplePropertyWrite) {
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
- CHECK_EQ(v8_num(4), Local<Value>::New(CcTest::isolate(), xValue));
+ CHECK(v8_num(4)->Equals(Local<Value>::New(CcTest::isolate(), xValue)));
xValue.Reset();
}
}
@@ -6282,7 +5542,7 @@ THREADED_TEST(SetterOnly) {
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
- CHECK_EQ(v8_num(4), Local<Value>::New(CcTest::isolate(), xValue));
+ CHECK(v8_num(4)->Equals(Local<Value>::New(CcTest::isolate(), xValue)));
xValue.Reset();
}
}
@@ -6292,10 +5552,8 @@ THREADED_TEST(NoAccessors) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetAccessor(v8_str("x"),
- static_cast<v8::AccessorGetterCallback>(NULL),
- NULL,
- v8_str("donut"));
+ templ->SetAccessor(v8_str("x"), static_cast<v8::AccessorGetterCallback>(NULL),
+ NULL, v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
Local<Script> script = v8_compile("obj.x = 4; obj.x");
@@ -6305,605 +5563,12 @@ THREADED_TEST(NoAccessors) {
}
-static void XPropertyGetter(Local<Name> property,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- CHECK(info.Data()->IsUndefined());
- info.GetReturnValue().Set(property);
-}
-
-
-THREADED_TEST(NamedInterceptorPropertyRead) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
- LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = v8_compile("obj.x");
- for (int i = 0; i < 10; i++) {
- Local<Value> result = script->Run();
- CHECK_EQ(result, v8_str("x"));
- }
-}
-
-
-THREADED_TEST(NamedInterceptorDictionaryIC) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
- LocalContext context;
- // Create an object with a named interceptor.
- context->Global()->Set(v8_str("interceptor_obj"), templ->NewInstance());
- Local<Script> script = v8_compile("interceptor_obj.x");
- for (int i = 0; i < 10; i++) {
- Local<Value> result = script->Run();
- CHECK_EQ(result, v8_str("x"));
- }
- // Create a slow case object and a function accessing a property in
- // that slow case object (with dictionary probing in generated
- // code). Then force object with a named interceptor into slow-case,
- // pass it to the function, and check that the interceptor is called
- // instead of accessing the local property.
- Local<Value> result =
- CompileRun("function get_x(o) { return o.x; };"
- "var obj = { x : 42, y : 0 };"
- "delete obj.y;"
- "for (var i = 0; i < 10; i++) get_x(obj);"
- "interceptor_obj.x = 42;"
- "interceptor_obj.y = 10;"
- "delete interceptor_obj.y;"
- "get_x(interceptor_obj)");
- CHECK_EQ(result, v8_str("x"));
-}
-
-
-THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<Context> context1 = Context::New(isolate);
-
- context1->Enter();
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
- // Create an object with a named interceptor.
- v8::Local<v8::Object> object = templ->NewInstance();
- context1->Global()->Set(v8_str("interceptor_obj"), object);
-
- // Force the object into the slow case.
- CompileRun("interceptor_obj.y = 0;"
- "delete interceptor_obj.y;");
- context1->Exit();
-
- {
- // Introduce the object into a different context.
- // Repeat named loads to exercise ICs.
- LocalContext context2;
- context2->Global()->Set(v8_str("interceptor_obj"), object);
- Local<Value> result =
- CompileRun("function get_x(o) { return o.x; }"
- "interceptor_obj.x = 42;"
- "for (var i=0; i != 10; i++) {"
- " get_x(interceptor_obj);"
- "}"
- "get_x(interceptor_obj)");
- // Check that the interceptor was actually invoked.
- CHECK_EQ(result, v8_str("x"));
- }
-
- // Return to the original context and force some object to the slow case
- // to cause the NormalizedMapCache to verify.
- context1->Enter();
- CompileRun("var obj = { x : 0 }; delete obj.x;");
- context1->Exit();
-}
-
-
-static void SetXOnPrototypeGetter(
- Local<Name> property, const v8::PropertyCallbackInfo<v8::Value>& info) {
- // Set x on the prototype object and do not handle the get request.
- v8::Handle<v8::Value> proto = info.Holder()->GetPrototype();
- proto.As<v8::Object>()->Set(v8_str("x"),
- v8::Integer::New(info.GetIsolate(), 23));
-}
-
-
-// This is a regression test for http://crbug.com/20104. Map
-// transitions should not interfere with post interceptor lookup.
-THREADED_TEST(NamedInterceptorMapTransitionRead) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<v8::FunctionTemplate> function_template =
- v8::FunctionTemplate::New(isolate);
- Local<v8::ObjectTemplate> instance_template
- = function_template->InstanceTemplate();
- instance_template->SetHandler(
- v8::NamedPropertyHandlerConfiguration(SetXOnPrototypeGetter));
- LocalContext context;
- context->Global()->Set(v8_str("F"), function_template->GetFunction());
- // Create an instance of F and introduce a map transition for x.
- CompileRun("var o = new F(); o.x = 23;");
- // Create an instance of F and invoke the getter. The result should be 23.
- Local<Value> result = CompileRun("o = new F(); o.x");
- CHECK_EQ(result->Int32Value(), 23);
-}
-
-
-static void IndexedPropertyGetter(
- uint32_t index,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (index == 37) {
- info.GetReturnValue().Set(v8_num(625));
- }
-}
-
-
-static void IndexedPropertySetter(
- uint32_t index,
- Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (index == 39) {
- info.GetReturnValue().Set(value);
- }
-}
-
-
-THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- IndexedPropertyGetter, IndexedPropertySetter));
- LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> getter_script = v8_compile(
- "obj.__defineGetter__(\"3\", function(){return 5;});obj[3];");
- Local<Script> setter_script = v8_compile(
- "obj.__defineSetter__(\"17\", function(val){this.foo = val;});"
- "obj[17] = 23;"
- "obj.foo;");
- Local<Script> interceptor_setter_script = v8_compile(
- "obj.__defineSetter__(\"39\", function(val){this.foo = \"hit\";});"
- "obj[39] = 47;"
- "obj.foo;"); // This setter should not run, due to the interceptor.
- Local<Script> interceptor_getter_script = v8_compile(
- "obj[37];");
- Local<Value> result = getter_script->Run();
- CHECK_EQ(v8_num(5), result);
- result = setter_script->Run();
- CHECK_EQ(v8_num(23), result);
- result = interceptor_setter_script->Run();
- CHECK_EQ(v8_num(23), result);
- result = interceptor_getter_script->Run();
- CHECK_EQ(v8_num(625), result);
-}
-
-
-static void UnboxedDoubleIndexedPropertyGetter(
- uint32_t index,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (index < 25) {
- info.GetReturnValue().Set(v8_num(index));
- }
-}
-
-
-static void UnboxedDoubleIndexedPropertySetter(
- uint32_t index,
- Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (index < 25) {
- info.GetReturnValue().Set(v8_num(index));
- }
-}
-
-
-void UnboxedDoubleIndexedPropertyEnumerator(
- const v8::PropertyCallbackInfo<v8::Array>& info) {
- // Force the list of returned keys to be stored in a FastDoubleArray.
- Local<Script> indexed_property_names_script = v8_compile(
- "keys = new Array(); keys[125000] = 1;"
- "for(i = 0; i < 80000; i++) { keys[i] = i; };"
- "keys.length = 25; keys;");
- Local<Value> result = indexed_property_names_script->Run();
- info.GetReturnValue().Set(Local<v8::Array>::Cast(result));
-}
-
-
-// Make sure that the the interceptor code in the runtime properly handles
-// merging property name lists for double-array-backed arrays.
-THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- UnboxedDoubleIndexedPropertyGetter, UnboxedDoubleIndexedPropertySetter, 0,
- 0, UnboxedDoubleIndexedPropertyEnumerator));
- LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
- // When obj is created, force it to be Stored in a FastDoubleArray.
- Local<Script> create_unboxed_double_script = v8_compile(
- "obj[125000] = 1; for(i = 0; i < 80000; i+=2) { obj[i] = i; } "
- "key_count = 0; "
- "for (x in obj) {key_count++;};"
- "obj;");
- Local<Value> result = create_unboxed_double_script->Run();
- CHECK(result->ToObject(isolate)->HasRealIndexedProperty(2000));
- Local<Script> key_count_check = v8_compile("key_count;");
- result = key_count_check->Run();
- CHECK_EQ(v8_num(40013), result);
-}
-
-
-void SloppyArgsIndexedPropertyEnumerator(
- const v8::PropertyCallbackInfo<v8::Array>& info) {
- // Force the list of returned keys to be stored in a Arguments object.
- Local<Script> indexed_property_names_script = v8_compile(
- "function f(w,x) {"
- " return arguments;"
- "}"
- "keys = f(0, 1, 2, 3);"
- "keys;");
- Local<Object> result =
- Local<Object>::Cast(indexed_property_names_script->Run());
- // Have to populate the handle manually, as it's not Cast-able.
- i::Handle<i::JSObject> o =
- v8::Utils::OpenHandle<Object, i::JSObject>(result);
- i::Handle<i::JSArray> array(reinterpret_cast<i::JSArray*>(*o));
- info.GetReturnValue().Set(v8::Utils::ToLocal(array));
-}
-
-
-static void SloppyIndexedPropertyGetter(
- uint32_t index,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (index < 4) {
- info.GetReturnValue().Set(v8_num(index));
- }
-}
-
-
-// Make sure that the the interceptor code in the runtime properly handles
-// merging property name lists for non-string arguments arrays.
-THREADED_TEST(IndexedInterceptorSloppyArgsWithIndexedAccessor) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- SloppyIndexedPropertyGetter, 0, 0, 0,
- SloppyArgsIndexedPropertyEnumerator));
- LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> create_args_script = v8_compile(
- "var key_count = 0;"
- "for (x in obj) {key_count++;} key_count;");
- Local<Value> result = create_args_script->Run();
- CHECK_EQ(v8_num(4), result);
-}
-
-
-static void IdentityIndexedPropertyGetter(
- uint32_t index,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(index);
-}
-
-
-THREADED_TEST(IndexedInterceptorWithGetOwnPropertyDescriptor) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
-
- // Check fast object case.
- const char* fast_case_code =
- "Object.getOwnPropertyDescriptor(obj, 0).value.toString()";
- ExpectString(fast_case_code, "0");
-
- // Check slow case.
- const char* slow_case_code =
- "obj.x = 1; delete obj.x;"
- "Object.getOwnPropertyDescriptor(obj, 1).value.toString()";
- ExpectString(slow_case_code, "1");
-}
-
-
-THREADED_TEST(IndexedInterceptorWithNoSetter) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
-
- const char* code =
- "try {"
- " obj[0] = 239;"
- " for (var i = 0; i < 100; i++) {"
- " var v = obj[0];"
- " if (v != 0) throw 'Wrong value ' + v + ' at iteration ' + i;"
- " }"
- " 'PASSED'"
- "} catch(e) {"
- " e"
- "}";
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(IndexedInterceptorWithAccessorCheck) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- obj->TurnOnAccessCheck();
- context->Global()->Set(v8_str("obj"), obj);
-
- const char* code =
- "var result = 'PASSED';"
- "for (var i = 0; i < 100; i++) {"
- " try {"
- " var v = obj[0];"
- " result = 'Wrong value ' + v + ' at iteration ' + i;"
- " break;"
- " } catch (e) {"
- " /* pass */"
- " }"
- "}"
- "result";
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(IndexedInterceptorWithAccessorCheckSwitchedOn) {
- i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
-
- const char* code =
- "var result = 'PASSED';"
- "for (var i = 0; i < 100; i++) {"
- " var expected = i;"
- " if (i == 5) {"
- " %EnableAccessChecks(obj);"
- " }"
- " try {"
- " var v = obj[i];"
- " if (i == 5) {"
- " result = 'Should not have reached this!';"
- " break;"
- " } else if (v != expected) {"
- " result = 'Wrong value ' + v + ' at iteration ' + i;"
- " break;"
- " }"
- " } catch (e) {"
- " if (i != 5) {"
- " result = e;"
- " }"
- " }"
- " if (i == 5) %DisableAccessChecks(obj);"
- "}"
- "result";
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(IndexedInterceptorWithDifferentIndices) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
-
- const char* code =
- "try {"
- " for (var i = 0; i < 100; i++) {"
- " var v = obj[i];"
- " if (v != i) throw 'Wrong value ' + v + ' at iteration ' + i;"
- " }"
- " 'PASSED'"
- "} catch(e) {"
- " e"
- "}";
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(IndexedInterceptorWithNegativeIndices) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
-
- const char* code =
- "try {"
- " for (var i = 0; i < 100; i++) {"
- " var expected = i;"
- " var key = i;"
- " if (i == 25) {"
- " key = -1;"
- " expected = undefined;"
- " }"
- " if (i == 50) {"
- " /* probe minimal Smi number on 32-bit platforms */"
- " key = -(1 << 30);"
- " expected = undefined;"
- " }"
- " if (i == 75) {"
- " /* probe minimal Smi number on 64-bit platforms */"
- " key = 1 << 31;"
- " expected = undefined;"
- " }"
- " var v = obj[key];"
- " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
- " }"
- " 'PASSED'"
- "} catch(e) {"
- " e"
- "}";
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(IndexedInterceptorWithNotSmiLookup) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
-
- const char* code =
- "try {"
- " for (var i = 0; i < 100; i++) {"
- " var expected = i;"
- " var key = i;"
- " if (i == 50) {"
- " key = 'foobar';"
- " expected = undefined;"
- " }"
- " var v = obj[key];"
- " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
- " }"
- " 'PASSED'"
- "} catch(e) {"
- " e"
- "}";
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(IndexedInterceptorGoingMegamorphic) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
-
- const char* code =
- "var original = obj;"
- "try {"
- " for (var i = 0; i < 100; i++) {"
- " var expected = i;"
- " if (i == 50) {"
- " obj = {50: 'foobar'};"
- " expected = 'foobar';"
- " }"
- " var v = obj[i];"
- " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
- " if (i == 50) obj = original;"
- " }"
- " 'PASSED'"
- "} catch(e) {"
- " e"
- "}";
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(IndexedInterceptorReceiverTurningSmi) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
-
- const char* code =
- "var original = obj;"
- "try {"
- " for (var i = 0; i < 100; i++) {"
- " var expected = i;"
- " if (i == 5) {"
- " obj = 239;"
- " expected = undefined;"
- " }"
- " var v = obj[i];"
- " if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
- " if (i == 5) obj = original;"
- " }"
- " 'PASSED'"
- "} catch(e) {"
- " e"
- "}";
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(IndexedInterceptorOnProto) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
-
- LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
-
- const char* code =
- "var o = {__proto__: obj};"
- "try {"
- " for (var i = 0; i < 100; i++) {"
- " var v = o[i];"
- " if (v != i) throw 'Wrong value ' + v + ' at iteration ' + i;"
- " }"
- " 'PASSED'"
- "} catch(e) {"
- " e"
- "}";
- ExpectString(code, "PASSED");
-}
-
-
THREADED_TEST(MultiContexts) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("dummy"), v8::FunctionTemplate::New(isolate,
- DummyCallHandler));
+ templ->Set(v8_str("dummy"),
+ v8::FunctionTemplate::New(isolate, DummyCallHandler));
Local<String> password = v8_str("Password");
@@ -6919,7 +5584,7 @@ THREADED_TEST(MultiContexts) {
context1->SetSecurityToken(password);
v8::Handle<v8::Object> global1 = context1->Global();
global1->Set(v8_str("custom"), v8_num(1234));
- CHECK_NE(global0, global1);
+ CHECK(!global0->Equals(global1));
CHECK_EQ(1234, global0->Get(v8_str("custom"))->Int32Value());
CHECK_EQ(1234, global1->Get(v8_str("custom"))->Int32Value());
@@ -6927,7 +5592,7 @@ THREADED_TEST(MultiContexts) {
LocalContext context2(0, templ, global1);
context2->SetSecurityToken(password);
v8::Handle<v8::Object> global2 = context2->Global();
- CHECK_EQ(global1, global2);
+ CHECK(global1->Equals(global2));
CHECK_EQ(0, global1->Get(v8_str("custom"))->Int32Value());
CHECK_EQ(0, global2->Get(v8_str("custom"))->Int32Value());
}
@@ -6940,8 +5605,7 @@ THREADED_TEST(FunctionPrototypeAcrossContexts) {
v8::HandleScope scope(CcTest::isolate());
LocalContext env0;
- v8::Handle<v8::Object> global0 =
- env0->Global();
+ v8::Handle<v8::Object> global0 = env0->Global();
v8::Handle<v8::Object> object0 =
global0->Get(v8_str("Object")).As<v8::Object>();
v8::Handle<v8::Object> tostring0 =
@@ -6951,8 +5615,7 @@ THREADED_TEST(FunctionPrototypeAcrossContexts) {
proto0->Set(v8_str("custom"), v8_num(1234));
LocalContext env1;
- v8::Handle<v8::Object> global1 =
- env1->Global();
+ v8::Handle<v8::Object> global1 = env1->Global();
v8::Handle<v8::Object> object1 =
global1->Get(v8_str("Object")).As<v8::Object>();
v8::Handle<v8::Object> tostring1 =
@@ -6972,9 +5635,10 @@ THREADED_TEST(Regress892105) {
v8::HandleScope scope(CcTest::isolate());
- Local<String> source = v8_str("Object.prototype.obj = 1234;"
- "Array.prototype.arr = 4567;"
- "8901");
+ Local<String> source = v8_str(
+ "Object.prototype.obj = 1234;"
+ "Array.prototype.arr = 4567;"
+ "8901");
LocalContext env0;
Local<Script> script0 = v8_compile(source);
@@ -7055,22 +5719,24 @@ THREADED_TEST(VoidLiteral) {
ExpectBoolean("void 0 === undetectable", false);
ExpectBoolean("void 0 === null", false);
- ExpectString("(function() {"
- " try {"
- " return x === void 0;"
- " } catch(e) {"
- " return e.toString();"
- " }"
- "})()",
- "ReferenceError: x is not defined");
- ExpectString("(function() {"
- " try {"
- " return void 0 === x;"
- " } catch(e) {"
- " return e.toString();"
- " }"
- "})()",
- "ReferenceError: x is not defined");
+ ExpectString(
+ "(function() {"
+ " try {"
+ " return x === void 0;"
+ " } catch(e) {"
+ " return e.toString();"
+ " }"
+ "})()",
+ "ReferenceError: x is not defined");
+ ExpectString(
+ "(function() {"
+ " try {"
+ " return void 0 === x;"
+ " } catch(e) {"
+ " return e.toString();"
+ " }"
+ "})()",
+ "ReferenceError: x is not defined");
}
@@ -7085,12 +5751,13 @@ THREADED_TEST(ExtensibleOnUndetectable) {
Local<v8::Object> obj = desc->GetFunction()->NewInstance();
env->Global()->Set(v8_str("undetectable"), obj);
- Local<String> source = v8_str("undetectable.x = 42;"
- "undetectable.x");
+ Local<String> source = v8_str(
+ "undetectable.x = 42;"
+ "undetectable.x");
Local<Script> script = v8_compile(source);
- CHECK_EQ(v8::Integer::New(isolate, 42), script->Run());
+ CHECK(v8::Integer::New(isolate, 42)->Equals(script->Run()));
ExpectBoolean("Object.isExtensible(undetectable)", true);
@@ -7106,81 +5773,6 @@ THREADED_TEST(ExtensibleOnUndetectable) {
}
-
-THREADED_TEST(UndetectableString) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
-
- Local<String> obj = String::NewFromUtf8(env->GetIsolate(), "foo",
- String::kUndetectableString);
- env->Global()->Set(v8_str("undetectable"), obj);
-
- ExpectString("undetectable", "foo");
- ExpectString("typeof undetectable", "undefined");
- ExpectString("typeof(undetectable)", "undefined");
- ExpectBoolean("typeof undetectable == 'undefined'", true);
- ExpectBoolean("typeof undetectable == 'string'", false);
- ExpectBoolean("if (undetectable) { true; } else { false; }", false);
- ExpectBoolean("!undetectable", true);
-
- ExpectObject("true&&undetectable", obj);
- ExpectBoolean("false&&undetectable", false);
- ExpectBoolean("true||undetectable", true);
- ExpectObject("false||undetectable", obj);
-
- ExpectObject("undetectable&&true", obj);
- ExpectObject("undetectable&&false", obj);
- ExpectBoolean("undetectable||true", true);
- ExpectBoolean("undetectable||false", false);
-
- ExpectBoolean("undetectable==null", true);
- ExpectBoolean("null==undetectable", true);
- ExpectBoolean("undetectable==undefined", true);
- ExpectBoolean("undefined==undetectable", true);
- ExpectBoolean("undetectable==undetectable", true);
-
-
- ExpectBoolean("undetectable===null", false);
- ExpectBoolean("null===undetectable", false);
- ExpectBoolean("undetectable===undefined", false);
- ExpectBoolean("undefined===undetectable", false);
- ExpectBoolean("undetectable===undetectable", true);
-}
-
-
-TEST(UndetectableOptimized) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
-
- Local<String> obj = String::NewFromUtf8(env->GetIsolate(), "foo",
- String::kUndetectableString);
- env->Global()->Set(v8_str("undetectable"), obj);
- env->Global()->Set(v8_str("detectable"), v8_str("bar"));
-
- ExpectString(
- "function testBranch() {"
- " if (!%_IsUndetectableObject(undetectable)) throw 1;"
- " if (%_IsUndetectableObject(detectable)) throw 2;"
- "}\n"
- "function testBool() {"
- " var b1 = !%_IsUndetectableObject(undetectable);"
- " var b2 = %_IsUndetectableObject(detectable);"
- " if (b1) throw 3;"
- " if (b2) throw 4;"
- " return b1 == b2;"
- "}\n"
- "%OptimizeFunctionOnNextCall(testBranch);"
- "%OptimizeFunctionOnNextCall(testBool);"
- "for (var i = 0; i < 10; i++) {"
- " testBranch();"
- " testBool();"
- "}\n"
- "\"PASS\"",
- "PASS");
-}
-
-
// The point of this test is type checking. We run it only so compilers
// don't complain about an unused function.
TEST(PersistentHandles) {
@@ -7218,45 +5810,44 @@ THREADED_TEST(GlobalObjectTemplate) {
static const char* kSimpleExtensionSource =
- "function Foo() {"
- " return 4;"
- "}";
+ "function Foo() {"
+ " return 4;"
+ "}";
TEST(SimpleExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("simpletest", kSimpleExtensionSource));
- const char* extension_names[] = { "simpletest" };
+ const char* extension_names[] = {"simpletest"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = CompileRun("Foo()");
- CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
+ CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 4)));
}
static const char* kStackTraceFromExtensionSource =
- "function foo() {"
- " throw new Error();"
- "}"
- "function bar() {"
- " foo();"
- "}";
+ "function foo() {"
+ " throw new Error();"
+ "}"
+ "function bar() {"
+ " foo();"
+ "}";
TEST(StackTraceInExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("stacktracetest",
- kStackTraceFromExtensionSource));
- const char* extension_names[] = { "stacktracetest" };
+ v8::RegisterExtension(
+ new Extension("stacktracetest", kStackTraceFromExtensionSource));
+ const char* extension_names[] = {"stacktracetest"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- CompileRun("function user() { bar(); }"
- "var error;"
- "try{ user(); } catch (e) { error = e; }");
+ CompileRun(
+ "function user() { bar(); }"
+ "var error;"
+ "try{ user(); } catch (e) { error = e; }");
CHECK_EQ(-1, CompileRun("error.stack.indexOf('foo')")->Int32Value());
CHECK_EQ(-1, CompileRun("error.stack.indexOf('bar')")->Int32Value());
CHECK_NE(-1, CompileRun("error.stack.indexOf('user')")->Int32Value());
@@ -7266,13 +5857,12 @@ TEST(StackTraceInExtension) {
TEST(NullExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("nulltest", NULL));
- const char* extension_names[] = { "nulltest" };
+ const char* extension_names[] = {"nulltest"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = CompileRun("1+3");
- CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
+ CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 4)));
}
@@ -7284,13 +5874,12 @@ static const int kEmbeddedExtensionSourceValidLen = 34;
TEST(ExtensionMissingSourceLength) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("srclentest_fail",
- kEmbeddedExtensionSource));
- const char* extension_names[] = { "srclentest_fail" };
+ v8::RegisterExtension(
+ new Extension("srclentest_fail", kEmbeddedExtensionSource));
+ const char* extension_names[] = {"srclentest_fail"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
- CHECK_EQ(0, *context);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ CHECK(0 == *context);
}
@@ -7300,89 +5889,84 @@ TEST(ExtensionWithSourceLength) {
v8::HandleScope handle_scope(CcTest::isolate());
i::ScopedVector<char> extension_name(32);
i::SNPrintF(extension_name, "ext #%d", source_len);
- v8::RegisterExtension(new Extension(extension_name.start(),
- kEmbeddedExtensionSource, 0, 0,
- source_len));
- const char* extension_names[1] = { extension_name.start() };
+ v8::RegisterExtension(new Extension(
+ extension_name.start(), kEmbeddedExtensionSource, 0, 0, source_len));
+ const char* extension_names[1] = {extension_name.start()};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
if (source_len == kEmbeddedExtensionSourceValidLen) {
Context::Scope lock(context);
v8::Handle<Value> result = CompileRun("Ret54321()");
- CHECK_EQ(v8::Integer::New(CcTest::isolate(), 54321), result);
+ CHECK(v8::Integer::New(CcTest::isolate(), 54321)->Equals(result));
} else {
// Anything but exactly the right length should fail to compile.
- CHECK_EQ(0, *context);
+ CHECK(0 == *context);
}
}
}
static const char* kEvalExtensionSource1 =
- "function UseEval1() {"
- " var x = 42;"
- " return eval('x');"
- "}";
+ "function UseEval1() {"
+ " var x = 42;"
+ " return eval('x');"
+ "}";
static const char* kEvalExtensionSource2 =
- "(function() {"
- " var x = 42;"
- " function e() {"
- " return eval('x');"
- " }"
- " this.UseEval2 = e;"
- "})()";
+ "(function() {"
+ " var x = 42;"
+ " function e() {"
+ " return eval('x');"
+ " }"
+ " this.UseEval2 = e;"
+ "})()";
TEST(UseEvalFromExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("evaltest1", kEvalExtensionSource1));
v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2));
- const char* extension_names[] = { "evaltest1", "evaltest2" };
+ const char* extension_names[] = {"evaltest1", "evaltest2"};
v8::ExtensionConfiguration extensions(2, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = CompileRun("UseEval1()");
- CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
+ CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 42)));
result = CompileRun("UseEval2()");
- CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
+ CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 42)));
}
static const char* kWithExtensionSource1 =
- "function UseWith1() {"
- " var x = 42;"
- " with({x:87}) { return x; }"
- "}";
-
+ "function UseWith1() {"
+ " var x = 42;"
+ " with({x:87}) { return x; }"
+ "}";
static const char* kWithExtensionSource2 =
- "(function() {"
- " var x = 42;"
- " function e() {"
- " with ({x:87}) { return x; }"
- " }"
- " this.UseWith2 = e;"
- "})()";
+ "(function() {"
+ " var x = 42;"
+ " function e() {"
+ " with ({x:87}) { return x; }"
+ " }"
+ " this.UseWith2 = e;"
+ "})()";
TEST(UseWithFromExtension) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("withtest1", kWithExtensionSource1));
v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2));
- const char* extension_names[] = { "withtest1", "withtest2" };
+ const char* extension_names[] = {"withtest1", "withtest2"};
v8::ExtensionConfiguration extensions(2, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = CompileRun("UseWith1()");
- CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 87));
+ CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 87)));
result = CompileRun("UseWith2()");
- CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 87));
+ CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 87)));
}
@@ -7391,46 +5975,41 @@ TEST(AutoExtensions) {
Extension* extension = new Extension("autotest", kSimpleExtensionSource);
extension->set_auto_enable(true);
v8::RegisterExtension(extension);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate());
+ v8::Handle<Context> context = Context::New(CcTest::isolate());
Context::Scope lock(context);
v8::Handle<Value> result = CompileRun("Foo()");
- CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
+ CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 4)));
}
-static const char* kSyntaxErrorInExtensionSource =
- "[";
+static const char* kSyntaxErrorInExtensionSource = "[";
// Test that a syntax error in an extension does not cause a fatal
// error but results in an empty context.
TEST(SyntaxErrorExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("syntaxerror",
- kSyntaxErrorInExtensionSource));
- const char* extension_names[] = { "syntaxerror" };
+ v8::RegisterExtension(
+ new Extension("syntaxerror", kSyntaxErrorInExtensionSource));
+ const char* extension_names[] = {"syntaxerror"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
-static const char* kExceptionInExtensionSource =
- "throw 42";
+static const char* kExceptionInExtensionSource = "throw 42";
// Test that an exception when installing an extension does not cause
// a fatal error but results in an empty context.
TEST(ExceptionExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("exception",
- kExceptionInExtensionSource));
- const char* extension_names[] = { "exception" };
+ v8::RegisterExtension(
+ new Extension("exception", kExceptionInExtensionSource));
+ const char* extension_names[] = {"exception"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
@@ -7447,35 +6026,32 @@ static const char* kNativeCallTest =
// Test that a native runtime calls are supported in extensions.
TEST(NativeCallInExtensions) {
v8::HandleScope handle_scope(CcTest::isolate());
- v8::RegisterExtension(new Extension("nativecall",
- kNativeCallInExtensionSource));
- const char* extension_names[] = { "nativecall" };
+ v8::RegisterExtension(
+ new Extension("nativecall", kNativeCallInExtensionSource));
+ const char* extension_names[] = {"nativecall"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = CompileRun(kNativeCallTest);
- CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 3));
+ CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 3)));
}
class NativeFunctionExtension : public Extension {
public:
- NativeFunctionExtension(const char* name,
- const char* source,
+ NativeFunctionExtension(const char* name, const char* source,
v8::FunctionCallback fun = &Echo)
- : Extension(name, source),
- function_(fun) { }
+ : Extension(name, source), function_(fun) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> name) {
+ v8::Isolate* isolate, v8::Handle<v8::String> name) {
return v8::FunctionTemplate::New(isolate, function_);
}
static void Echo(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() >= 1) args.GetReturnValue().Set(args[0]);
}
+
private:
v8::FunctionCallback function_;
};
@@ -7484,15 +6060,14 @@ class NativeFunctionExtension : public Extension {
TEST(NativeFunctionDeclaration) {
v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedecl";
- v8::RegisterExtension(new NativeFunctionExtension(name,
- "native function foo();"));
- const char* extension_names[] = { name };
+ v8::RegisterExtension(
+ new NativeFunctionExtension(name, "native function foo();"));
+ const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = CompileRun("foo(42);");
- CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
+ CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 42)));
}
@@ -7500,12 +6075,11 @@ TEST(NativeFunctionDeclarationError) {
v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedeclerr";
// Syntax error in extension code.
- v8::RegisterExtension(new NativeFunctionExtension(name,
- "native\nfunction foo();"));
- const char* extension_names[] = { name };
+ v8::RegisterExtension(
+ new NativeFunctionExtension(name, "native\nfunction foo();"));
+ const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
@@ -7515,13 +6089,11 @@ TEST(NativeFunctionDeclarationErrorEscape) {
const char* name = "nativedeclerresc";
// Syntax error in extension code - escape code in "native" means that
// it's not treated as a keyword.
- v8::RegisterExtension(new NativeFunctionExtension(
- name,
- "nativ\\u0065 function foo();"));
- const char* extension_names[] = { name };
+ v8::RegisterExtension(
+ new NativeFunctionExtension(name, "nativ\\u0065 function foo();"));
+ const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &extensions);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
@@ -7530,8 +6102,8 @@ static void CheckDependencies(const char* name, const char* expected) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::ExtensionConfiguration config(1, &name);
LocalContext context(&config);
- CHECK_EQ(String::NewFromUtf8(CcTest::isolate(), expected),
- context->Global()->Get(v8_str("loaded")));
+ CHECK(String::NewFromUtf8(CcTest::isolate(), expected)
+ ->Equals(context->Global()->Get(v8_str("loaded"))));
}
@@ -7543,11 +6115,11 @@ static void CheckDependencies(const char* name, const char* expected) {
* \-- C <--/
*/
THREADED_TEST(ExtensionDependency) {
- static const char* kEDeps[] = { "D" };
+ static const char* kEDeps[] = {"D"};
v8::RegisterExtension(new Extension("E", "this.loaded += 'E';", 1, kEDeps));
- static const char* kDDeps[] = { "B", "C" };
+ static const char* kDDeps[] = {"B", "C"};
v8::RegisterExtension(new Extension("D", "this.loaded += 'D';", 2, kDDeps));
- static const char* kBCDeps[] = { "A" };
+ static const char* kBCDeps[] = {"A"};
v8::RegisterExtension(new Extension("B", "this.loaded += 'B';", 1, kBCDeps));
v8::RegisterExtension(new Extension("C", "this.loaded += 'C';", 1, kBCDeps));
v8::RegisterExtension(new Extension("A", "this.loaded += 'A';"));
@@ -7557,22 +6129,23 @@ THREADED_TEST(ExtensionDependency) {
CheckDependencies("D", "undefinedABCD");
CheckDependencies("E", "undefinedABCDE");
v8::HandleScope handle_scope(CcTest::isolate());
- static const char* exts[2] = { "C", "E" };
+ static const char* exts[2] = {"C", "E"};
v8::ExtensionConfiguration config(2, exts);
LocalContext context(&config);
- CHECK_EQ(v8_str("undefinedACBDE"), context->Global()->Get(v8_str("loaded")));
+ CHECK(v8_str("undefinedACBDE")
+ ->Equals(context->Global()->Get(v8_str("loaded"))));
}
static const char* kExtensionTestScript =
- "native function A();"
- "native function B();"
- "native function C();"
- "function Foo(i) {"
- " if (i == 0) return A();"
- " if (i == 1) return B();"
- " if (i == 2) return C();"
- "}";
+ "native function A();"
+ "native function B();"
+ "native function C();"
+ "function Foo(i) {"
+ " if (i == 0) return A();"
+ " if (i == 1) return B();"
+ " if (i == 2) return C();"
+ "}";
static void CallFun(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -7588,10 +6161,9 @@ static void CallFun(const v8::FunctionCallbackInfo<v8::Value>& args) {
class FunctionExtension : public Extension {
public:
- FunctionExtension() : Extension("functiontest", kExtensionTestScript) { }
+ FunctionExtension() : Extension("functiontest", kExtensionTestScript) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<String> name);
+ v8::Isolate* isolate, v8::Handle<String> name);
};
@@ -7600,14 +6172,14 @@ v8::Handle<v8::FunctionTemplate> FunctionExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Handle<String> name) {
lookup_count++;
if (name->Equals(v8_str("A"))) {
- return v8::FunctionTemplate::New(
- isolate, CallFun, v8::Integer::New(isolate, 8));
+ return v8::FunctionTemplate::New(isolate, CallFun,
+ v8::Integer::New(isolate, 8));
} else if (name->Equals(v8_str("B"))) {
- return v8::FunctionTemplate::New(
- isolate, CallFun, v8::Integer::New(isolate, 7));
+ return v8::FunctionTemplate::New(isolate, CallFun,
+ v8::Integer::New(isolate, 7));
} else if (name->Equals(v8_str("C"))) {
- return v8::FunctionTemplate::New(
- isolate, CallFun, v8::Integer::New(isolate, 6));
+ return v8::FunctionTemplate::New(isolate, CallFun,
+ v8::Integer::New(isolate, 6));
} else {
return v8::Handle<v8::FunctionTemplate>();
}
@@ -7617,34 +6189,31 @@ v8::Handle<v8::FunctionTemplate> FunctionExtension::GetNativeFunctionTemplate(
THREADED_TEST(FunctionLookup) {
v8::RegisterExtension(new FunctionExtension());
v8::HandleScope handle_scope(CcTest::isolate());
- static const char* exts[1] = { "functiontest" };
+ static const char* exts[1] = {"functiontest"};
v8::ExtensionConfiguration config(1, exts);
LocalContext context(&config);
CHECK_EQ(3, lookup_count);
- CHECK_EQ(v8::Integer::New(CcTest::isolate(), 8),
- CompileRun("Foo(0)"));
- CHECK_EQ(v8::Integer::New(CcTest::isolate(), 7),
- CompileRun("Foo(1)"));
- CHECK_EQ(v8::Integer::New(CcTest::isolate(), 6),
- CompileRun("Foo(2)"));
+ CHECK(v8::Integer::New(CcTest::isolate(), 8)->Equals(CompileRun("Foo(0)")));
+ CHECK(v8::Integer::New(CcTest::isolate(), 7)->Equals(CompileRun("Foo(1)")));
+ CHECK(v8::Integer::New(CcTest::isolate(), 6)->Equals(CompileRun("Foo(2)")));
}
THREADED_TEST(NativeFunctionConstructCall) {
v8::RegisterExtension(new FunctionExtension());
v8::HandleScope handle_scope(CcTest::isolate());
- static const char* exts[1] = { "functiontest" };
+ static const char* exts[1] = {"functiontest"};
v8::ExtensionConfiguration config(1, exts);
LocalContext context(&config);
for (int i = 0; i < 10; i++) {
// Run a few times to ensure that allocation of objects doesn't
// change behavior of a constructor function.
- CHECK_EQ(v8::Integer::New(CcTest::isolate(), 8),
- CompileRun("(new A()).data"));
- CHECK_EQ(v8::Integer::New(CcTest::isolate(), 7),
- CompileRun("(new B()).data"));
- CHECK_EQ(v8::Integer::New(CcTest::isolate(), 6),
- CompileRun("(new C()).data"));
+ CHECK(v8::Integer::New(CcTest::isolate(), 8)
+ ->Equals(CompileRun("(new A()).data")));
+ CHECK(v8::Integer::New(CcTest::isolate(), 7)
+ ->Equals(CompileRun("(new B()).data")));
+ CHECK(v8::Integer::New(CcTest::isolate(), 6)
+ ->Equals(CompileRun("(new C()).data")));
}
}
@@ -7664,24 +6233,23 @@ void StoringErrorCallback(const char* location, const char* message) {
// unusable and therefore this test cannot be run in parallel.
TEST(ErrorReporting) {
v8::V8::SetFatalErrorHandler(StoringErrorCallback);
- static const char* aDeps[] = { "B" };
+ static const char* aDeps[] = {"B"};
v8::RegisterExtension(new Extension("A", "", 1, aDeps));
- static const char* bDeps[] = { "A" };
+ static const char* bDeps[] = {"A"};
v8::RegisterExtension(new Extension("B", "", 1, bDeps));
last_location = NULL;
v8::ExtensionConfiguration config(1, bDeps);
- v8::Handle<Context> context =
- Context::New(CcTest::isolate(), &config);
+ v8::Handle<Context> context = Context::New(CcTest::isolate(), &config);
CHECK(context.IsEmpty());
- CHECK_NE(last_location, NULL);
+ CHECK(last_location);
}
static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message,
v8::Handle<Value> data) {
CHECK(message->GetScriptOrigin().ResourceName()->IsUndefined());
- CHECK_EQ(v8::Undefined(CcTest::isolate()),
- message->GetScriptOrigin().ResourceName());
+ CHECK(v8::Undefined(CcTest::isolate())
+ ->Equals(message->GetScriptOrigin().ResourceName()));
message->GetLineNumber();
message->GetSourceLine();
}
@@ -7797,9 +6365,11 @@ class Trivial2 {
void CheckInternalFields(
- const v8::InternalFieldsCallbackData<Trivial, Trivial2>& data) {
- Trivial* t1 = data.GetInternalField1();
- Trivial2* t2 = data.GetInternalField2();
+ const v8::PhantomCallbackData<v8::Persistent<v8::Object>>& data) {
+ v8::Persistent<v8::Object>* handle = data.GetParameter();
+ handle->Reset();
+ Trivial* t1 = reinterpret_cast<Trivial*>(data.GetInternalField1());
+ Trivial2* t2 = reinterpret_cast<Trivial2*>(data.GetInternalField2());
CHECK_EQ(42, t1->x());
CHECK_EQ(103, t2->x());
t1->set_x(1729);
@@ -7835,7 +6405,8 @@ void InternalFieldCallback(bool global_gc) {
reinterpret_cast<Trivial2*>(obj->GetAlignedPointerFromInternalField(1));
CHECK_EQ(103, t2->x());
- handle.SetPhantom(CheckInternalFields, 0, 1);
+ handle.SetPhantom<v8::Persistent<v8::Object>>(&handle, CheckInternalFields,
+ 0, 1);
if (!global_gc) {
handle.MarkIndependent();
}
@@ -7918,9 +6489,7 @@ THREADED_TEST(ResetWeakHandle) {
}
-static void InvokeScavenge() {
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
-}
+static void InvokeScavenge() { CcTest::heap()->CollectGarbage(i::NEW_SPACE); }
static void InvokeMarkSweep() {
@@ -8009,7 +6578,7 @@ THREADED_TEST(IndependentHandleRevival) {
v8::Local<v8::Object> o =
v8::Local<v8::Object>::New(isolate, object.handle);
v8::Local<String> y_str = v8_str("y");
- CHECK_EQ(v8::Integer::New(isolate, 1), o->Get(v8_str("x")));
+ CHECK(v8::Integer::New(isolate, 1)->Equals(o->Get(v8_str("x"))));
CHECK(o->Get(y_str)->Equals(y_str));
}
}
@@ -8022,12 +6591,12 @@ static void ArgumentsTestCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
v8::Isolate* isolate = args.GetIsolate();
- CHECK_EQ(args_fun, args.Callee());
+ CHECK(args_fun->Equals(args.Callee()));
CHECK_EQ(3, args.Length());
- CHECK_EQ(v8::Integer::New(isolate, 1), args[0]);
- CHECK_EQ(v8::Integer::New(isolate, 2), args[1]);
- CHECK_EQ(v8::Integer::New(isolate, 3), args[2]);
- CHECK_EQ(v8::Undefined(isolate), args[3]);
+ CHECK(v8::Integer::New(isolate, 1)->Equals(args[0]));
+ CHECK(v8::Integer::New(isolate, 2)->Equals(args[1]));
+ CHECK(v8::Integer::New(isolate, 3)->Equals(args[2]));
+ CHECK(v8::Undefined(isolate)->Equals(args[3]));
v8::HandleScope scope(args.GetIsolate());
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -8045,163 +6614,8 @@ THREADED_TEST(Arguments) {
}
-static void NoBlockGetterX(Local<Name> name,
- const v8::PropertyCallbackInfo<v8::Value>&) {}
-
-
-static void NoBlockGetterI(uint32_t index,
- const v8::PropertyCallbackInfo<v8::Value>&) {
-}
-
-
-static void PDeleter(Local<Name> name,
- const v8::PropertyCallbackInfo<v8::Boolean>& info) {
- if (!name->Equals(v8_str("foo"))) {
- return; // not intercepted
- }
-
- info.GetReturnValue().Set(false); // intercepted, don't delete the property
-}
-
-
-static void IDeleter(uint32_t index,
- const v8::PropertyCallbackInfo<v8::Boolean>& info) {
- if (index != 2) {
- return; // not intercepted
- }
-
- info.GetReturnValue().Set(false); // intercepted, don't delete the property
-}
-
-
-THREADED_TEST(Deleter) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
- obj->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX, NULL,
- NULL, PDeleter, NULL));
- obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- NoBlockGetterI, NULL, NULL, IDeleter, NULL));
- LocalContext context;
- context->Global()->Set(v8_str("k"), obj->NewInstance());
- CompileRun(
- "k.foo = 'foo';"
- "k.bar = 'bar';"
- "k[2] = 2;"
- "k[4] = 4;");
- CHECK(v8_compile("delete k.foo")->Run()->IsFalse());
- CHECK(v8_compile("delete k.bar")->Run()->IsTrue());
-
- CHECK_EQ(v8_compile("k.foo")->Run(), v8_str("foo"));
- CHECK(v8_compile("k.bar")->Run()->IsUndefined());
-
- CHECK(v8_compile("delete k[2]")->Run()->IsFalse());
- CHECK(v8_compile("delete k[4]")->Run()->IsTrue());
-
- CHECK_EQ(v8_compile("k[2]")->Run(), v8_num(2));
- CHECK(v8_compile("k[4]")->Run()->IsUndefined());
-}
-
-
-static void GetK(Local<Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (name->Equals(v8_str("foo")) ||
- name->Equals(v8_str("bar")) ||
- name->Equals(v8_str("baz"))) {
- info.GetReturnValue().SetUndefined();
- }
-}
-
-
-static void IndexedGetK(uint32_t index,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (index == 0 || index == 1) info.GetReturnValue().SetUndefined();
-}
-
-
-static void NamedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
- ApiTestFuzzer::Fuzz();
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 3);
- result->Set(v8::Integer::New(info.GetIsolate(), 0), v8_str("foo"));
- result->Set(v8::Integer::New(info.GetIsolate(), 1), v8_str("bar"));
- result->Set(v8::Integer::New(info.GetIsolate(), 2), v8_str("baz"));
- info.GetReturnValue().Set(result);
-}
-
-
-static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
- ApiTestFuzzer::Fuzz();
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
- result->Set(v8::Integer::New(info.GetIsolate(), 0), v8_str("0"));
- result->Set(v8::Integer::New(info.GetIsolate(), 1), v8_str("1"));
- info.GetReturnValue().Set(result);
-}
-
-
-THREADED_TEST(Enumerators) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
- obj->SetHandler(
- v8::NamedPropertyHandlerConfiguration(GetK, NULL, NULL, NULL, NamedEnum));
- obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- IndexedGetK, NULL, NULL, NULL, IndexedEnum));
- LocalContext context;
- context->Global()->Set(v8_str("k"), obj->NewInstance());
- v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
- "k[10] = 0;"
- "k.a = 0;"
- "k[5] = 0;"
- "k.b = 0;"
- "k[4294967295] = 0;"
- "k.c = 0;"
- "k[4294967296] = 0;"
- "k.d = 0;"
- "k[140000] = 0;"
- "k.e = 0;"
- "k[30000000000] = 0;"
- "k.f = 0;"
- "var result = [];"
- "for (var prop in k) {"
- " result.push(prop);"
- "}"
- "result"));
- // Check that we get all the property names returned including the
- // ones from the enumerators in the right order: indexed properties
- // in numerical order, indexed interceptor properties, named
- // properties in insertion order, named interceptor properties.
- // This order is not mandated by the spec, so this test is just
- // documenting our behavior.
- CHECK_EQ(17, result->Length());
- // Indexed properties in numerical order.
- CHECK_EQ(v8_str("5"), result->Get(v8::Integer::New(isolate, 0)));
- CHECK_EQ(v8_str("10"), result->Get(v8::Integer::New(isolate, 1)));
- CHECK_EQ(v8_str("140000"), result->Get(v8::Integer::New(isolate, 2)));
- CHECK_EQ(v8_str("4294967295"), result->Get(v8::Integer::New(isolate, 3)));
- // Indexed interceptor properties in the order they are returned
- // from the enumerator interceptor.
- CHECK_EQ(v8_str("0"), result->Get(v8::Integer::New(isolate, 4)));
- CHECK_EQ(v8_str("1"), result->Get(v8::Integer::New(isolate, 5)));
- // Named properties in insertion order.
- CHECK_EQ(v8_str("a"), result->Get(v8::Integer::New(isolate, 6)));
- CHECK_EQ(v8_str("b"), result->Get(v8::Integer::New(isolate, 7)));
- CHECK_EQ(v8_str("c"), result->Get(v8::Integer::New(isolate, 8)));
- CHECK_EQ(v8_str("4294967296"), result->Get(v8::Integer::New(isolate, 9)));
- CHECK_EQ(v8_str("d"), result->Get(v8::Integer::New(isolate, 10)));
- CHECK_EQ(v8_str("e"), result->Get(v8::Integer::New(isolate, 11)));
- CHECK_EQ(v8_str("30000000000"), result->Get(v8::Integer::New(isolate, 12)));
- CHECK_EQ(v8_str("f"), result->Get(v8::Integer::New(isolate, 13)));
- // Named interceptor properties.
- CHECK_EQ(v8_str("foo"), result->Get(v8::Integer::New(isolate, 14)));
- CHECK_EQ(v8_str("bar"), result->Get(v8::Integer::New(isolate, 15)));
- CHECK_EQ(v8_str("baz"), result->Get(v8::Integer::New(isolate, 16)));
-}
-
-
-int p_getter_count;
-int p_getter_count2;
+static int p_getter_count;
+static int p_getter_count2;
static void PGetter(Local<String> name,
@@ -8210,15 +6624,15 @@ static void PGetter(Local<String> name,
p_getter_count++;
v8::Handle<v8::Object> global =
info.GetIsolate()->GetCurrentContext()->Global();
- CHECK_EQ(info.Holder(), global->Get(v8_str("o1")));
+ CHECK(info.Holder()->Equals(global->Get(v8_str("o1"))));
if (name->Equals(v8_str("p1"))) {
- CHECK_EQ(info.This(), global->Get(v8_str("o1")));
+ CHECK(info.This()->Equals(global->Get(v8_str("o1"))));
} else if (name->Equals(v8_str("p2"))) {
- CHECK_EQ(info.This(), global->Get(v8_str("o2")));
+ CHECK(info.This()->Equals(global->Get(v8_str("o2"))));
} else if (name->Equals(v8_str("p3"))) {
- CHECK_EQ(info.This(), global->Get(v8_str("o3")));
+ CHECK(info.This()->Equals(global->Get(v8_str("o3"))));
} else if (name->Equals(v8_str("p4"))) {
- CHECK_EQ(info.This(), global->Get(v8_str("o4")));
+ CHECK(info.This()->Equals(global->Get(v8_str("o4"))));
}
}
@@ -8245,15 +6659,15 @@ static void PGetter2(Local<Name> name,
p_getter_count2++;
v8::Handle<v8::Object> global =
info.GetIsolate()->GetCurrentContext()->Global();
- CHECK_EQ(info.Holder(), global->Get(v8_str("o1")));
+ CHECK(info.Holder()->Equals(global->Get(v8_str("o1"))));
if (name->Equals(v8_str("p1"))) {
- CHECK_EQ(info.This(), global->Get(v8_str("o1")));
+ CHECK(info.This()->Equals(global->Get(v8_str("o1"))));
} else if (name->Equals(v8_str("p2"))) {
- CHECK_EQ(info.This(), global->Get(v8_str("o2")));
+ CHECK(info.This()->Equals(global->Get(v8_str("o2"))));
} else if (name->Equals(v8_str("p3"))) {
- CHECK_EQ(info.This(), global->Get(v8_str("o3")));
+ CHECK(info.This()->Equals(global->Get(v8_str("o3"))));
} else if (name->Equals(v8_str("p4"))) {
- CHECK_EQ(info.This(), global->Get(v8_str("o4")));
+ CHECK(info.This()->Equals(global->Get(v8_str("o4"))));
}
}
@@ -8293,11 +6707,11 @@ THREADED_TEST(ObjectInstantiation) {
for (int i = 0; i < 100; i++) {
v8::HandleScope inner_scope(CcTest::isolate());
v8::Handle<v8::Object> obj = templ->NewInstance();
- CHECK_NE(obj, context->Global()->Get(v8_str("o")));
+ CHECK(!obj->Equals(context->Global()->Get(v8_str("o"))));
context->Global()->Set(v8_str("o2"), obj);
v8::Handle<Value> value =
CompileRun("o.__proto__ === o2.__proto__");
- CHECK_EQ(v8::True(isolate), value);
+ CHECK(v8::True(isolate)->Equals(value));
context->Global()->Set(v8_str("o"), obj);
}
}
@@ -8721,14 +7135,14 @@ static void WriteUtf8Helper(
uint16_t lead = StringGet(string, nchars - 2);
if (((lead & 0xfc00) == 0xd800) &&
((trail & 0xfc00) == 0xdc00)) {
- unsigned char u1 = buffer2[utf8_written2 - 4];
- unsigned char u2 = buffer2[utf8_written2 - 3];
- unsigned char u3 = buffer2[utf8_written2 - 2];
- unsigned char u4 = buffer2[utf8_written2 - 1];
- CHECK_EQ((u1 & 0xf8), 0xf0);
- CHECK_EQ((u2 & 0xc0), 0x80);
- CHECK_EQ((u3 & 0xc0), 0x80);
- CHECK_EQ((u4 & 0xc0), 0x80);
+ unsigned u1 = buffer2[utf8_written2 - 4];
+ unsigned u2 = buffer2[utf8_written2 - 3];
+ unsigned u3 = buffer2[utf8_written2 - 2];
+ unsigned u4 = buffer2[utf8_written2 - 1];
+ CHECK_EQ((u1 & 0xf8), 0xf0u);
+ CHECK_EQ((u2 & 0xc0), 0x80u);
+ CHECK_EQ((u3 & 0xc0), 0x80u);
+ CHECK_EQ((u4 & 0xc0), 0x80u);
uint32_t c = 0x10000 + ((lead & 0x3ff) << 10) + (trail & 0x3ff);
CHECK_EQ((u4 & 0x3f), (c & 0x3f));
CHECK_EQ((u3 & 0x3f), ((c >> 6) & 0x3f));
@@ -9039,7 +7453,7 @@ THREADED_TEST(DeleteAccessor) {
context->Global()->Set(v8_str("holder"), holder);
v8::Handle<Value> result = CompileRun(
"holder.y = 11; holder.y = 12; holder.y");
- CHECK_EQ(12, result->Uint32Value());
+ CHECK_EQ(12u, result->Uint32Value());
}
@@ -9159,10 +7573,10 @@ static void ExceptionInNativeScriptTestListener(v8::Handle<v8::Message> message,
v8::Handle<v8::Value> name_val = message->GetScriptOrigin().ResourceName();
CHECK(!name_val.IsEmpty() && name_val->IsString());
v8::String::Utf8Value name(message->GetScriptOrigin().ResourceName());
- CHECK_EQ(script_resource_name, *name);
+ CHECK_EQ(0, strcmp(script_resource_name, *name));
CHECK_EQ(3, message->GetLineNumber());
v8::String::Utf8Value source_line(message->GetSourceLine());
- CHECK_EQ(" new o.foo();", *source_line);
+ CHECK_EQ(0, strcmp(" new o.foo();", *source_line));
}
@@ -9195,7 +7609,7 @@ TEST(CompilationErrorUsingTryCatchHandler) {
v8::HandleScope scope(env->GetIsolate());
v8::TryCatch try_catch;
v8_compile("This doesn't &*&@#$&*^ compile.");
- CHECK_NE(NULL, *try_catch.Exception());
+ CHECK(*try_catch.Exception());
CHECK(try_catch.HasCaught());
}
@@ -9243,7 +7657,7 @@ TEST(TryCatchFinallyStoresMessageUsingTryCatchHandler) {
CHECK(try_catch.HasCaught());
CHECK(!try_catch.Message().IsEmpty());
String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ(*exception_value, "1");
+ CHECK_EQ(0, strcmp(*exception_value, "1"));
try_catch.Reset();
CompileRun("try {"
" CEvaluate('throw 1;');"
@@ -9253,7 +7667,7 @@ TEST(TryCatchFinallyStoresMessageUsingTryCatchHandler) {
CHECK(try_catch.HasCaught());
CHECK(!try_catch.Message().IsEmpty());
String::Utf8Value finally_exception_value(try_catch.Exception());
- CHECK_EQ(*finally_exception_value, "2");
+ CHECK_EQ(0, strcmp(*finally_exception_value, "2"));
}
@@ -9646,7 +8060,7 @@ TEST(ContextDetachGlobal) {
env3->Enter();
Local<v8::Object> global3 = env3->Global();
- CHECK_EQ(global2, global3);
+ CHECK(global2->Equals(global3));
CHECK(global3->Get(v8_str("prop"))->IsUndefined());
CHECK(global3->Get(v8_str("getProp"))->IsUndefined());
global3->Set(v8_str("prop"), v8::Integer::New(env3->GetIsolate(), -1));
@@ -9712,7 +8126,7 @@ TEST(DetachGlobal) {
0,
v8::Handle<v8::ObjectTemplate>(),
global2);
- CHECK_EQ(global2, env3->Global());
+ CHECK(global2->Equals(env3->Global()));
// Start by using the same security token for env3 as for env1 and env2.
env3->SetSecurityToken(foo);
@@ -9786,13 +8200,13 @@ TEST(DetachedAccesses) {
Local<Value> result;
result = CompileRun("bound_x()");
- CHECK_EQ(v8_str("env2_x"), result);
+ CHECK(v8_str("env2_x")->Equals(result));
result = CompileRun("get_x()");
CHECK(result.IsEmpty());
result = CompileRun("get_x_w()");
CHECK(result.IsEmpty());
result = CompileRun("this_x()");
- CHECK_EQ(v8_str("env2_x"), result);
+ CHECK(v8_str("env2_x")->Equals(result));
// Reattach env2's proxy
env2 = Context::New(env1->GetIsolate(),
@@ -9814,12 +8228,12 @@ TEST(DetachedAccesses) {
"}"
"results");
Local<v8::Array> results = Local<v8::Array>::Cast(result);
- CHECK_EQ(16, results->Length());
+ CHECK_EQ(16u, results->Length());
for (int i = 0; i < 16; i += 4) {
- CHECK_EQ(v8_str("env2_x"), results->Get(i + 0));
- CHECK_EQ(v8_str("env1_x"), results->Get(i + 1));
- CHECK_EQ(v8_str("env3_x"), results->Get(i + 2));
- CHECK_EQ(v8_str("env2_x"), results->Get(i + 3));
+ CHECK(v8_str("env2_x")->Equals(results->Get(i + 0)));
+ CHECK(v8_str("env1_x")->Equals(results->Get(i + 1)));
+ CHECK(v8_str("env3_x")->Equals(results->Get(i + 2)));
+ CHECK(v8_str("env2_x")->Equals(results->Get(i + 3)));
}
}
@@ -9833,12 +8247,12 @@ TEST(DetachedAccesses) {
"}"
"results");
Local<v8::Array> results = Local<v8::Array>::Cast(result);
- CHECK_EQ(16, results->Length());
+ CHECK_EQ(16u, results->Length());
for (int i = 0; i < 16; i += 4) {
- CHECK_EQ(v8_str("env2_x"), results->Get(i + 0));
- CHECK_EQ(v8_str("env3_x"), results->Get(i + 1));
- CHECK_EQ(v8_str("env3_x"), results->Get(i + 2));
- CHECK_EQ(v8_str("env2_x"), results->Get(i + 3));
+ CHECK(v8_str("env2_x")->Equals(results->Get(i + 0)));
+ CHECK(v8_str("env3_x")->Equals(results->Get(i + 1)));
+ CHECK(v8_str("env3_x")->Equals(results->Get(i + 2)));
+ CHECK(v8_str("env2_x")->Equals(results->Get(i + 3)));
}
result = CompileRun(
@@ -9851,12 +8265,12 @@ TEST(DetachedAccesses) {
"}"
"results");
results = Local<v8::Array>::Cast(result);
- CHECK_EQ(16, results->Length());
+ CHECK_EQ(16u, results->Length());
for (int i = 0; i < 16; i += 4) {
- CHECK_EQ(v8_str("env2_x"), results->Get(i + 0));
- CHECK_EQ(v8_str("env1_x"), results->Get(i + 1));
- CHECK_EQ(v8_str("env3_x"), results->Get(i + 2));
- CHECK_EQ(v8_str("env2_x"), results->Get(i + 3));
+ CHECK(v8_str("env2_x")->Equals(results->Get(i + 0)));
+ CHECK(v8_str("env1_x")->Equals(results->Get(i + 1)));
+ CHECK(v8_str("env3_x")->Equals(results->Get(i + 2)));
+ CHECK(v8_str("env2_x")->Equals(results->Get(i + 3)));
}
}
@@ -10220,7 +8634,9 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
TEST(SuperAccessControl) {
+ i::FLAG_allow_natives_syntax = true;
i::FLAG_harmony_classes = true;
+ i::FLAG_harmony_object_literals = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> obj_template =
@@ -10233,8 +8649,8 @@ TEST(SuperAccessControl) {
{
v8::TryCatch try_catch;
CompileRun(
- "function f() { return super.hasOwnProperty; };"
- "var m = f.toMethod(prohibited);"
+ "var f = { m() { return super.hasOwnProperty; } }.m;"
+ "var m = %ToMethod(f, prohibited);"
"m();");
CHECK(try_catch.HasCaught());
}
@@ -10242,8 +8658,8 @@ TEST(SuperAccessControl) {
{
v8::TryCatch try_catch;
CompileRun(
- "function f() { return super[42]; };"
- "var m = f.toMethod(prohibited);"
+ "var f = {m() { return super[42]; } }.m;"
+ "var m = %ToMethod(f, prohibited);"
"m();");
CHECK(try_catch.HasCaught());
}
@@ -10251,8 +8667,8 @@ TEST(SuperAccessControl) {
{
v8::TryCatch try_catch;
CompileRun(
- "function f() { super.hasOwnProperty = function () {}; };"
- "var m = f.toMethod(prohibited);"
+ "var f = {m() { super.hasOwnProperty = function () {}; } }.m;"
+ "var m = %ToMethod(f, prohibited);"
"m();");
CHECK(try_catch.HasCaught());
}
@@ -10261,78 +8677,19 @@ TEST(SuperAccessControl) {
v8::TryCatch try_catch;
CompileRun(
"Object.defineProperty(Object.prototype, 'x', { set : function(){}});"
- "function f() { "
- " 'use strict';"
- " super.x = function () {}; "
- "};"
- "var m = f.toMethod(prohibited);"
+ "var f = {"
+ " m() { "
+ " 'use strict';"
+ " super.x = function () {};"
+ " }"
+ "}.m;"
+ "var m = %ToMethod(f, prohibited);"
"m();");
CHECK(try_catch.HasCaught());
}
}
-static void IndexedPropertyEnumerator(
- const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 1);
- result->Set(0, v8::Integer::New(info.GetIsolate(), 7));
- info.GetReturnValue().Set(result);
-}
-
-
-static void NamedPropertyEnumerator(
- const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
- result->Set(0, v8_str("x"));
- result->Set(1, v8::Symbol::GetIterator(info.GetIsolate()));
- info.GetReturnValue().Set(result);
-}
-
-
-THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
-
- obj_template->Set(v8_str("7"), v8::Integer::New(CcTest::isolate(), 7));
- obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
- obj_template->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- NULL, NULL, NULL, NULL, IndexedPropertyEnumerator));
- obj_template->SetHandler(v8::NamedPropertyHandlerConfiguration(
- NULL, NULL, NULL, NULL, NamedPropertyEnumerator));
-
- LocalContext context;
- v8::Handle<v8::Object> global = context->Global();
- global->Set(v8_str("object"), obj_template->NewInstance());
-
- v8::Handle<v8::Value> result =
- CompileRun("Object.getOwnPropertyNames(object)");
- CHECK(result->IsArray());
- v8::Handle<v8::Array> result_array = v8::Handle<v8::Array>::Cast(result);
- CHECK_EQ(2, result_array->Length());
- CHECK(result_array->Get(0)->IsString());
- CHECK(result_array->Get(1)->IsString());
- CHECK_EQ(v8_str("7"), result_array->Get(0));
- CHECK_EQ(v8_str("x"), result_array->Get(1));
-
- result = CompileRun("var ret = []; for (var k in object) ret.push(k); ret");
- CHECK(result->IsArray());
- result_array = v8::Handle<v8::Array>::Cast(result);
- CHECK_EQ(2, result_array->Length());
- CHECK(result_array->Get(0)->IsString());
- CHECK(result_array->Get(1)->IsString());
- CHECK_EQ(v8_str("7"), result_array->Get(0));
- CHECK_EQ(v8_str("x"), result_array->Get(1));
-
- result = CompileRun("Object.getOwnPropertySymbols(object)");
- CHECK(result->IsArray());
- result_array = v8::Handle<v8::Array>::Cast(result);
- CHECK_EQ(1, result_array->Length());
- CHECK_EQ(result_array->Get(0), v8::Symbol::GetIterator(isolate));
-}
-
-
static void ConstTenGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
info.GetReturnValue().Set(v8_num(10));
@@ -10602,104 +8959,7 @@ THREADED_TEST(AccessControlFlatten) {
}
-static void AccessControlNamedGetter(
- Local<Name>, const v8::PropertyCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(42);
-}
-
-
-static void AccessControlNamedSetter(
- Local<Name>, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(value);
-}
-
-
-static void AccessControlIndexedGetter(
- uint32_t index,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(v8_num(42));
-}
-
-
-static void AccessControlIndexedSetter(
- uint32_t,
- Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(value);
-}
-
-
-THREADED_TEST(AccessControlInterceptorIC) {
- named_access_count = 0;
- indexed_access_count = 0;
-
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
-
- // Create an environment.
- v8::Local<Context> context0 = Context::New(isolate);
- context0->Enter();
-
- // Create an object that requires access-check functions to be
- // called for cross-domain access. The object also has interceptors
- // interceptor.
- v8::Handle<v8::ObjectTemplate> object_template =
- v8::ObjectTemplate::New(isolate);
- object_template->SetAccessCheckCallbacks(NamedAccessCounter,
- IndexedAccessCounter);
- object_template->SetHandler(v8::NamedPropertyHandlerConfiguration(
- AccessControlNamedGetter, AccessControlNamedSetter));
- object_template->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- AccessControlIndexedGetter, AccessControlIndexedSetter));
- Local<v8::Object> object = object_template->NewInstance();
-
- v8::HandleScope scope1(isolate);
-
- // Create another environment.
- v8::Local<Context> context1 = Context::New(isolate);
- context1->Enter();
-
- // Make easy access to the object from the other environment.
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("obj"), object);
-
- v8::Handle<Value> value;
-
- // Check that the named access-control function is called every time
- // eventhough there is an interceptor on the object.
- value = v8_compile("for (var i = 0; i < 10; i++) obj.x = 1;")->Run();
- value = v8_compile("for (var i = 0; i < 10; i++) obj.x;"
- "obj.x")->Run();
- CHECK(value->IsNumber());
- CHECK_EQ(42, value->Int32Value());
- CHECK_EQ(21, named_access_count);
-
- value = v8_compile("var p = 'x';")->Run();
- value = v8_compile("for (var i = 0; i < 10; i++) obj[p] = 1;")->Run();
- value = v8_compile("for (var i = 0; i < 10; i++) obj[p];"
- "obj[p]")->Run();
- CHECK(value->IsNumber());
- CHECK_EQ(42, value->Int32Value());
- CHECK_EQ(42, named_access_count);
-
- // Check that the indexed access-control function is called every
- // time eventhough there is an interceptor on the object.
- value = v8_compile("for (var i = 0; i < 10; i++) obj[0] = 1;")->Run();
- value = v8_compile("for (var i = 0; i < 10; i++) obj[0];"
- "obj[0]")->Run();
- CHECK(value->IsNumber());
- CHECK_EQ(42, value->Int32Value());
- CHECK_EQ(21, indexed_access_count);
-
- context1->Exit();
- context0->Exit();
-}
-
-
-THREADED_TEST(Version) {
- v8::V8::GetVersion();
-}
+THREADED_TEST(Version) { v8::V8::GetVersion(); }
static void InstanceFunctionCallback(
@@ -11067,20 +9327,20 @@ THREADED_TEST(SetPrototype) {
// object.
Local<Value> proto = o0->Get(v8_str("__proto__"));
CHECK(proto->IsObject());
- CHECK_EQ(proto.As<v8::Object>(), o3);
+ CHECK(proto.As<v8::Object>()->Equals(o3));
// However, Object::GetPrototype ignores hidden prototype.
Local<Value> proto0 = o0->GetPrototype();
CHECK(proto0->IsObject());
- CHECK_EQ(proto0.As<v8::Object>(), o1);
+ CHECK(proto0.As<v8::Object>()->Equals(o1));
Local<Value> proto1 = o1->GetPrototype();
CHECK(proto1->IsObject());
- CHECK_EQ(proto1.As<v8::Object>(), o2);
+ CHECK(proto1.As<v8::Object>()->Equals(o2));
Local<Value> proto2 = o2->GetPrototype();
CHECK(proto2->IsObject());
- CHECK_EQ(proto2.As<v8::Object>(), o3);
+ CHECK(proto2.As<v8::Object>()->Equals(o3));
}
@@ -11277,11 +9537,11 @@ THREADED_TEST(GetterSetterExceptions) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope handle_scope(isolate);
CompileRun(
- "function Foo() { };"
- "function Throw() { throw 5; };"
- "var x = { };"
- "x.__defineSetter__('set', Throw);"
- "x.__defineGetter__('get', Throw);");
+ "function Foo() { };"
+ "function Throw() { throw 5; };"
+ "var x = { };"
+ "x.__defineSetter__('set', Throw);"
+ "x.__defineGetter__('get', Throw);");
Local<v8::Object> x =
Local<v8::Object>::Cast(context->Global()->Get(v8_str("x")));
v8::TryCatch try_catch;
@@ -11345,7 +9605,8 @@ THREADED_TEST(ConstructorForObject) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope handle_scope(isolate);
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
+ {
+ Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(ConstructorCallback);
Local<Object> instance = instance_template->NewInstance();
context->Global()->Set(v8_str("obj"), instance);
@@ -11359,7 +9620,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK(value->IsInt32());
CHECK_EQ(28, value->Int32Value());
- Local<Value> args1[] = { v8_num(28) };
+ Local<Value> args1[] = {v8_num(28)};
Local<Value> value_obj1 = instance->CallAsConstructor(1, args1);
CHECK(value_obj1->IsObject());
Local<Object> object1 = Local<Object>::Cast(value_obj1);
@@ -11369,14 +9630,14 @@ THREADED_TEST(ConstructorForObject) {
CHECK_EQ(28, value->Int32Value());
// Call the Object's constructor with a String.
- value = CompileRun(
- "(function() { var o = new obj('tipli'); return o.a; })()");
+ value =
+ CompileRun("(function() { var o = new obj('tipli'); return o.a; })()");
CHECK(!try_catch.HasCaught());
CHECK(value->IsString());
String::Utf8Value string_value1(value->ToString(isolate));
- CHECK_EQ("tipli", *string_value1);
+ CHECK_EQ(0, strcmp("tipli", *string_value1));
- Local<Value> args2[] = { v8_str("tipli") };
+ Local<Value> args2[] = {v8_str("tipli")};
Local<Value> value_obj2 = instance->CallAsConstructor(1, args2);
CHECK(value_obj2->IsObject());
Local<Object> object2 = Local<Object>::Cast(value_obj2);
@@ -11384,7 +9645,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK(!try_catch.HasCaught());
CHECK(value->IsString());
String::Utf8Value string_value2(value->ToString(isolate));
- CHECK_EQ("tipli", *string_value2);
+ CHECK_EQ(0, strcmp("tipli", *string_value2));
// Call the Object's constructor with a Boolean.
value = CompileRun("(function() { var o = new obj(true); return o.a; })()");
@@ -11392,7 +9653,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK(value->IsBoolean());
CHECK_EQ(true, value->BooleanValue());
- Handle<Value> args3[] = { v8::True(isolate) };
+ Handle<Value> args3[] = {v8::True(isolate)};
Local<Value> value_obj3 = instance->CallAsConstructor(1, args3);
CHECK(value_obj3->IsObject());
Local<Object> object3 = Local<Object>::Cast(value_obj3);
@@ -11402,7 +9663,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK_EQ(true, value->BooleanValue());
// Call the Object's constructor with undefined.
- Handle<Value> args4[] = { v8::Undefined(isolate) };
+ Handle<Value> args4[] = {v8::Undefined(isolate)};
Local<Value> value_obj4 = instance->CallAsConstructor(1, args4);
CHECK(value_obj4->IsObject());
Local<Object> object4 = Local<Object>::Cast(value_obj4);
@@ -11411,7 +9672,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK(value->IsUndefined());
// Call the Object's constructor with null.
- Handle<Value> args5[] = { v8::Null(isolate) };
+ Handle<Value> args5[] = {v8::Null(isolate)};
Local<Value> value_obj5 = instance->CallAsConstructor(1, args5);
CHECK(value_obj5->IsObject());
Local<Object> object5 = Local<Object>::Cast(value_obj5);
@@ -11421,7 +9682,8 @@ THREADED_TEST(ConstructorForObject) {
}
// Check exception handling when there is no constructor set for the Object.
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
+ {
+ Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
Local<Object> instance = instance_template->NewInstance();
context->Global()->Set(v8_str("obj2"), instance);
v8::TryCatch try_catch;
@@ -11431,19 +9693,21 @@ THREADED_TEST(ConstructorForObject) {
value = CompileRun("new obj2(28)");
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value1(try_catch.Exception());
- CHECK_EQ("TypeError: object is not a function", *exception_value1);
+ CHECK_EQ(0, strcmp("TypeError: obj2 is not a function", *exception_value1));
try_catch.Reset();
- Local<Value> args[] = { v8_num(29) };
+ Local<Value> args[] = {v8_num(29)};
value = instance->CallAsConstructor(1, args);
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
- CHECK_EQ("TypeError: #<Object> is not a function", *exception_value2);
+ CHECK_EQ(
+ 0, strcmp("TypeError: #<Object> is not a function", *exception_value2));
try_catch.Reset();
}
// Check the case when constructor throws exception.
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
+ {
+ Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(ThrowValue);
Local<Object> instance = instance_template->NewInstance();
context->Global()->Set(v8_str("obj3"), instance);
@@ -11454,19 +9718,20 @@ THREADED_TEST(ConstructorForObject) {
value = CompileRun("new obj3(22)");
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value1(try_catch.Exception());
- CHECK_EQ("22", *exception_value1);
+ CHECK_EQ(0, strcmp("22", *exception_value1));
try_catch.Reset();
- Local<Value> args[] = { v8_num(23) };
+ Local<Value> args[] = {v8_num(23)};
value = instance->CallAsConstructor(1, args);
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
- CHECK_EQ("23", *exception_value2);
+ CHECK_EQ(0, strcmp("23", *exception_value2));
try_catch.Reset();
}
// Check whether constructor returns with an object or non-object.
- { Local<FunctionTemplate> function_template =
+ {
+ Local<FunctionTemplate> function_template =
FunctionTemplate::New(isolate, FakeConstructorCallback);
Local<Function> function = function_template->GetFunction();
Local<Object> instance1 = function;
@@ -11482,7 +9747,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK(!try_catch.HasCaught());
CHECK(value->IsObject());
- Local<Value> args1[] = { v8_num(28) };
+ Local<Value> args1[] = {v8_num(28)};
value = instance1->CallAsConstructor(1, args1);
CHECK(!try_catch.HasCaught());
CHECK(value->IsObject());
@@ -11500,7 +9765,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK(!try_catch.HasCaught());
CHECK(!value->IsObject());
- Local<Value> args2[] = { v8_num(28) };
+ Local<Value> args2[] = {v8_num(28)};
value = instance2->CallAsConstructor(1, args2);
CHECK(!try_catch.HasCaught());
CHECK(!value->IsObject());
@@ -11517,19 +9782,19 @@ THREADED_TEST(FunctionDescriptorException) {
Local<Function> cons = templ->GetFunction();
context->Global()->Set(v8_str("Fun"), cons);
Local<Value> value = CompileRun(
- "function test() {"
- " try {"
- " (new Fun()).blah()"
- " } catch (e) {"
- " var str = String(e);"
- // " if (str.indexOf('TypeError') == -1) return 1;"
- // " if (str.indexOf('[object Fun]') != -1) return 2;"
- // " if (str.indexOf('#<Fun>') == -1) return 3;"
- " return 0;"
- " }"
- " return 4;"
- "}"
- "test();");
+ "function test() {"
+ " try {"
+ " (new Fun()).blah()"
+ " } catch (e) {"
+ " var str = String(e);"
+ // " if (str.indexOf('TypeError') == -1) return 1;"
+ // " if (str.indexOf('[object Fun]') != -1) return 2;"
+ // " if (str.indexOf('#<Fun>') == -1) return 3;"
+ " return 0;"
+ " }"
+ " return 4;"
+ "}"
+ "test();");
CHECK_EQ(0, value->Int32Value());
}
@@ -11656,12 +9921,12 @@ THREADED_TEST(EvalInDetachedGlobal) {
// Set up function in context0 that uses eval from context0.
context0->Enter();
- v8::Handle<v8::Value> fun =
- CompileRun("var x = 42;"
- "(function() {"
- " var e = eval;"
- " return function(s) { return e(s); }"
- "})()");
+ v8::Handle<v8::Value> fun = CompileRun(
+ "var x = 42;"
+ "(function() {"
+ " var e = eval;"
+ " return function(s) { return e(s); }"
+ "})()");
context0->Exit();
// Put the function into context1 and call it before and after
@@ -11724,7 +9989,8 @@ THREADED_TEST(CallAsFunction) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
+ {
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetCallAsFunctionHandler(call_as_function);
Local<v8::Object> instance = t->GetFunction()->NewInstance();
@@ -11746,8 +10012,9 @@ THREADED_TEST(CallAsFunction) {
CHECK(!try_catch.HasCaught());
CHECK_EQ(45, value->Int32Value());
- value = CompileRun("obj.call = Function.prototype.call;"
- "obj.call(null, 87)");
+ value = CompileRun(
+ "obj.call = Function.prototype.call;"
+ "obj.call(null, 87)");
CHECK(!try_catch.HasCaught());
CHECK_EQ(87, value->Int32Value());
@@ -11771,13 +10038,14 @@ THREADED_TEST(CallAsFunction) {
// Check that the call-as-function handler can be called through
// the API.
- v8::Handle<Value> args[] = { v8_num(28) };
+ v8::Handle<Value> args[] = {v8_num(28)};
value = instance->CallAsFunction(instance, 1, args);
CHECK(!try_catch.HasCaught());
CHECK_EQ(28, value->Int32Value());
}
- { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
+ {
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template(t->InstanceTemplate());
USE(instance_template);
Local<v8::Object> instance = t->GetFunction()->NewInstance();
@@ -11792,22 +10060,23 @@ THREADED_TEST(CallAsFunction) {
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value1(try_catch.Exception());
// TODO(verwaest): Better message
- CHECK_EQ("TypeError: object is not a function",
- *exception_value1);
+ CHECK_EQ(0, strcmp("TypeError: obj2 is not a function", *exception_value1));
try_catch.Reset();
// Call an object without call-as-function handler through the API
value = CompileRun("obj2(28)");
- v8::Handle<Value> args[] = { v8_num(28) };
+ v8::Handle<Value> args[] = {v8_num(28)};
value = instance->CallAsFunction(instance, 1, args);
CHECK(value.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
- CHECK_EQ("TypeError: [object Object] is not a function", *exception_value2);
+ CHECK_EQ(0, strcmp("TypeError: [object Object] is not a function",
+ *exception_value2));
try_catch.Reset();
}
- { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
+ {
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetCallAsFunctionHandler(ThrowValue);
Local<v8::Object> instance = t->GetFunction()->NewInstance();
@@ -11820,18 +10089,19 @@ THREADED_TEST(CallAsFunction) {
value = CompileRun("obj3(22)");
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value1(try_catch.Exception());
- CHECK_EQ("22", *exception_value1);
+ CHECK_EQ(0, strcmp("22", *exception_value1));
try_catch.Reset();
- v8::Handle<Value> args[] = { v8_num(23) };
+ v8::Handle<Value> args[] = {v8_num(23)};
value = instance->CallAsFunction(instance, 1, args);
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
- CHECK_EQ("23", *exception_value2);
+ CHECK_EQ(0, strcmp("23", *exception_value2));
try_catch.Reset();
}
- { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
+ {
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetCallAsFunctionHandler(ReturnThis);
Local<v8::Object> instance = t->GetFunction()->NewInstance();
@@ -11839,34 +10109,29 @@ THREADED_TEST(CallAsFunction) {
Local<v8::Value> a1 =
instance->CallAsFunction(v8::Undefined(isolate), 0, NULL);
CHECK(a1->StrictEquals(instance));
- Local<v8::Value> a2 =
- instance->CallAsFunction(v8::Null(isolate), 0, NULL);
+ Local<v8::Value> a2 = instance->CallAsFunction(v8::Null(isolate), 0, NULL);
CHECK(a2->StrictEquals(instance));
- Local<v8::Value> a3 =
- instance->CallAsFunction(v8_num(42), 0, NULL);
+ Local<v8::Value> a3 = instance->CallAsFunction(v8_num(42), 0, NULL);
CHECK(a3->StrictEquals(instance));
- Local<v8::Value> a4 =
- instance->CallAsFunction(v8_str("hello"), 0, NULL);
+ Local<v8::Value> a4 = instance->CallAsFunction(v8_str("hello"), 0, NULL);
CHECK(a4->StrictEquals(instance));
- Local<v8::Value> a5 =
- instance->CallAsFunction(v8::True(isolate), 0, NULL);
+ Local<v8::Value> a5 = instance->CallAsFunction(v8::True(isolate), 0, NULL);
CHECK(a5->StrictEquals(instance));
}
- { CompileRun(
- "function ReturnThisSloppy() {"
- " return this;"
- "}"
- "function ReturnThisStrict() {"
- " 'use strict';"
- " return this;"
- "}");
- Local<Function> ReturnThisSloppy =
- Local<Function>::Cast(
- context->Global()->Get(v8_str("ReturnThisSloppy")));
- Local<Function> ReturnThisStrict =
- Local<Function>::Cast(
- context->Global()->Get(v8_str("ReturnThisStrict")));
+ {
+ CompileRun(
+ "function ReturnThisSloppy() {"
+ " return this;"
+ "}"
+ "function ReturnThisStrict() {"
+ " 'use strict';"
+ " return this;"
+ "}");
+ Local<Function> ReturnThisSloppy = Local<Function>::Cast(
+ context->Global()->Get(v8_str("ReturnThisSloppy")));
+ Local<Function> ReturnThisStrict = Local<Function>::Cast(
+ context->Global()->Get(v8_str("ReturnThisStrict")));
Local<v8::Value> a1 =
ReturnThisSloppy->CallAsFunction(v8::Undefined(isolate), 0, NULL);
@@ -11874,8 +10139,7 @@ THREADED_TEST(CallAsFunction) {
Local<v8::Value> a2 =
ReturnThisSloppy->CallAsFunction(v8::Null(isolate), 0, NULL);
CHECK(a2->StrictEquals(context->Global()));
- Local<v8::Value> a3 =
- ReturnThisSloppy->CallAsFunction(v8_num(42), 0, NULL);
+ Local<v8::Value> a3 = ReturnThisSloppy->CallAsFunction(v8_num(42), 0, NULL);
CHECK(a3->IsNumberObject());
CHECK_EQ(42.0, a3.As<v8::NumberObject>()->ValueOf());
Local<v8::Value> a4 =
@@ -11893,8 +10157,7 @@ THREADED_TEST(CallAsFunction) {
Local<v8::Value> a7 =
ReturnThisStrict->CallAsFunction(v8::Null(isolate), 0, NULL);
CHECK(a7->IsNull());
- Local<v8::Value> a8 =
- ReturnThisStrict->CallAsFunction(v8_num(42), 0, NULL);
+ Local<v8::Value> a8 = ReturnThisStrict->CallAsFunction(v8_num(42), 0, NULL);
CHECK(a8->StrictEquals(v8_num(42)));
Local<v8::Value> a9 =
ReturnThisStrict->CallAsFunction(v8_str("hello"), 0, NULL);
@@ -11912,7 +10175,8 @@ THREADED_TEST(CallableObject) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
+ {
+ Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(call_as_function);
Local<Object> instance = instance_template->NewInstance();
v8::TryCatch try_catch;
@@ -11921,7 +10185,8 @@ THREADED_TEST(CallableObject) {
CHECK(!try_catch.HasCaught());
}
- { Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
+ {
+ Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
Local<Object> instance = instance_template->NewInstance();
v8::TryCatch try_catch;
@@ -11929,7 +10194,8 @@ THREADED_TEST(CallableObject) {
CHECK(!try_catch.HasCaught());
}
- { Local<FunctionTemplate> function_template =
+ {
+ Local<FunctionTemplate> function_template =
FunctionTemplate::New(isolate, call_as_function);
Local<Function> function = function_template->GetFunction();
Local<Object> instance = function;
@@ -11939,7 +10205,8 @@ THREADED_TEST(CallableObject) {
CHECK(!try_catch.HasCaught());
}
- { Local<FunctionTemplate> function_template = FunctionTemplate::New(isolate);
+ {
+ Local<FunctionTemplate> function_template = FunctionTemplate::New(isolate);
Local<Function> function = function_template->GetFunction();
Local<Object> instance = function;
v8::TryCatch try_catch;
@@ -11991,834 +10258,6 @@ THREADED_TEST(HandleIteration) {
}
-static void InterceptorHasOwnPropertyGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
-}
-
-
-THREADED_TEST(InterceptorHasOwnProperty) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope scope(isolate);
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
- Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
- instance_templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorHasOwnPropertyGetter));
- Local<Function> function = fun_templ->GetFunction();
- context->Global()->Set(v8_str("constructor"), function);
- v8::Handle<Value> value = CompileRun(
- "var o = new constructor();"
- "o.hasOwnProperty('ostehaps');");
- CHECK_EQ(false, value->BooleanValue());
- value = CompileRun(
- "o.ostehaps = 42;"
- "o.hasOwnProperty('ostehaps');");
- CHECK_EQ(true, value->BooleanValue());
- value = CompileRun(
- "var p = new constructor();"
- "p.hasOwnProperty('ostehaps');");
- CHECK_EQ(false, value->BooleanValue());
-}
-
-
-static void InterceptorHasOwnPropertyGetterGC(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
-}
-
-
-THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope scope(isolate);
- Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
- Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
- instance_templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorHasOwnPropertyGetterGC));
- Local<Function> function = fun_templ->GetFunction();
- context->Global()->Set(v8_str("constructor"), function);
- // Let's first make some stuff so we can be sure to get a good GC.
- CompileRun(
- "function makestr(size) {"
- " switch (size) {"
- " case 1: return 'f';"
- " case 2: return 'fo';"
- " case 3: return 'foo';"
- " }"
- " return makestr(size >> 1) + makestr((size + 1) >> 1);"
- "}"
- "var x = makestr(12345);"
- "x = makestr(31415);"
- "x = makestr(23456);");
- v8::Handle<Value> value = CompileRun(
- "var o = new constructor();"
- "o.__proto__ = new String(x);"
- "o.hasOwnProperty('ostehaps');");
- CHECK_EQ(false, value->BooleanValue());
-}
-
-
-static void CheckInterceptorLoadIC(
- v8::GenericNamedPropertyGetterCallback getter, const char* source,
- int expected) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(getter, 0, 0, 0, 0,
- v8_str("data")));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(source);
- CHECK_EQ(expected, value->Int32Value());
-}
-
-
-static void InterceptorLoadICGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- v8::Isolate* isolate = CcTest::isolate();
- CHECK_EQ(isolate, info.GetIsolate());
- CHECK_EQ(v8_str("data"), info.Data());
- CHECK_EQ(v8_str("x"), name);
- info.GetReturnValue().Set(v8::Integer::New(isolate, 42));
-}
-
-
-// This test should hit the load IC for the interceptor case.
-THREADED_TEST(InterceptorLoadIC) {
- CheckInterceptorLoadIC(InterceptorLoadICGetter,
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result = o.x;"
- "}",
- 42);
-}
-
-
-// Below go several tests which verify that JITing for various
-// configurations of interceptor and explicit fields works fine
-// (those cases are special cased to get better performance).
-
-static void InterceptorLoadXICGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- info.GetReturnValue().Set(
- v8_str("x")->Equals(name) ?
- v8::Handle<v8::Value>(v8::Integer::New(info.GetIsolate(), 42)) :
- v8::Handle<v8::Value>());
-}
-
-
-THREADED_TEST(InterceptorLoadICWithFieldOnHolder) {
- CheckInterceptorLoadIC(InterceptorLoadXICGetter,
- "var result = 0;"
- "o.y = 239;"
- "for (var i = 0; i < 1000; i++) {"
- " result = o.y;"
- "}",
- 239);
-}
-
-
-THREADED_TEST(InterceptorLoadICWithSubstitutedProto) {
- CheckInterceptorLoadIC(InterceptorLoadXICGetter,
- "var result = 0;"
- "o.__proto__ = { 'y': 239 };"
- "for (var i = 0; i < 1000; i++) {"
- " result = o.y + o.x;"
- "}",
- 239 + 42);
-}
-
-
-THREADED_TEST(InterceptorLoadICWithPropertyOnProto) {
- CheckInterceptorLoadIC(InterceptorLoadXICGetter,
- "var result = 0;"
- "o.__proto__.y = 239;"
- "for (var i = 0; i < 1000; i++) {"
- " result = o.y + o.x;"
- "}",
- 239 + 42);
-}
-
-
-THREADED_TEST(InterceptorLoadICUndefined) {
- CheckInterceptorLoadIC(InterceptorLoadXICGetter,
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result = (o.y == undefined) ? 239 : 42;"
- "}",
- 239);
-}
-
-
-THREADED_TEST(InterceptorLoadICWithOverride) {
- CheckInterceptorLoadIC(InterceptorLoadXICGetter,
- "fst = new Object(); fst.__proto__ = o;"
- "snd = new Object(); snd.__proto__ = fst;"
- "var result1 = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result1 = snd.x;"
- "}"
- "fst.x = 239;"
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result = snd.x;"
- "}"
- "result + result1",
- 239 + 42);
-}
-
-
-// Test the case when we stored field into
-// a stub, but interceptor produced value on its own.
-THREADED_TEST(InterceptorLoadICFieldNotNeeded) {
- CheckInterceptorLoadIC(InterceptorLoadXICGetter,
- "proto = new Object();"
- "o.__proto__ = proto;"
- "proto.x = 239;"
- "for (var i = 0; i < 1000; i++) {"
- " o.x;"
- // Now it should be ICed and keep a reference to x defined on proto
- "}"
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result += o.x;"
- "}"
- "result;",
- 42 * 1000);
-}
-
-
-// Test the case when we stored field into
-// a stub, but it got invalidated later on.
-THREADED_TEST(InterceptorLoadICInvalidatedField) {
- CheckInterceptorLoadIC(InterceptorLoadXICGetter,
- "proto1 = new Object();"
- "proto2 = new Object();"
- "o.__proto__ = proto1;"
- "proto1.__proto__ = proto2;"
- "proto2.y = 239;"
- "for (var i = 0; i < 1000; i++) {"
- " o.y;"
- // Now it should be ICed and keep a reference to y defined on proto2
- "}"
- "proto1.y = 42;"
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result += o.y;"
- "}"
- "result;",
- 42 * 1000);
-}
-
-
-static int interceptor_load_not_handled_calls = 0;
-static void InterceptorLoadNotHandled(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ++interceptor_load_not_handled_calls;
-}
-
-
-// Test how post-interceptor lookups are done in the non-cacheable
-// case: the interceptor should not be invoked during this lookup.
-THREADED_TEST(InterceptorLoadICPostInterceptor) {
- interceptor_load_not_handled_calls = 0;
- CheckInterceptorLoadIC(InterceptorLoadNotHandled,
- "receiver = new Object();"
- "receiver.__proto__ = o;"
- "proto = new Object();"
- "/* Make proto a slow-case object. */"
- "for (var i = 0; i < 1000; i++) {"
- " proto[\"xxxxxxxx\" + i] = [];"
- "}"
- "proto.x = 17;"
- "o.__proto__ = proto;"
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result += receiver.x;"
- "}"
- "result;",
- 17 * 1000);
- CHECK_EQ(1000, interceptor_load_not_handled_calls);
-}
-
-
-// Test the case when we stored field into
-// a stub, but it got invalidated later on due to override on
-// global object which is between interceptor and fields' holders.
-THREADED_TEST(InterceptorLoadICInvalidatedFieldViaGlobal) {
- CheckInterceptorLoadIC(InterceptorLoadXICGetter,
- "o.__proto__ = this;" // set a global to be a proto of o.
- "this.__proto__.y = 239;"
- "for (var i = 0; i < 10; i++) {"
- " if (o.y != 239) throw 'oops: ' + o.y;"
- // Now it should be ICed and keep a reference to y defined on field_holder.
- "}"
- "this.y = 42;" // Assign on a global.
- "var result = 0;"
- "for (var i = 0; i < 10; i++) {"
- " result += o.y;"
- "}"
- "result;",
- 42 * 10);
-}
-
-
-static void SetOnThis(Local<String> name,
- Local<Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- Local<Object>::Cast(info.This())->ForceSet(name, value);
-}
-
-
-THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- templ->SetAccessor(v8_str("y"), Return239Callback);
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
-
- // Check the case when receiver and interceptor's holder
- // are the same objects.
- v8::Handle<Value> value = CompileRun(
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result = o.y;"
- "}");
- CHECK_EQ(239, value->Int32Value());
-
- // Check the case when interceptor's holder is in proto chain
- // of receiver.
- value = CompileRun(
- "r = { __proto__: o };"
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result = r.y;"
- "}");
- CHECK_EQ(239, value->Int32Value());
-}
-
-
-THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
- templ_o->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
- templ_p->SetAccessor(v8_str("y"), Return239Callback);
-
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
- context->Global()->Set(v8_str("p"), templ_p->NewInstance());
-
- // Check the case when receiver and interceptor's holder
- // are the same objects.
- v8::Handle<Value> value = CompileRun(
- "o.__proto__ = p;"
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result = o.x + o.y;"
- "}");
- CHECK_EQ(239 + 42, value->Int32Value());
-
- // Check the case when interceptor's holder is in proto chain
- // of receiver.
- value = CompileRun(
- "r = { __proto__: o };"
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result = r.x + r.y;"
- "}");
- CHECK_EQ(239 + 42, value->Int32Value());
-}
-
-
-THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- templ->SetAccessor(v8_str("y"), Return239Callback);
-
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
-
- v8::Handle<Value> value = CompileRun(
- "fst = new Object(); fst.__proto__ = o;"
- "snd = new Object(); snd.__proto__ = fst;"
- "var result1 = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result1 = snd.x;"
- "}"
- "fst.x = 239;"
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result = snd.x;"
- "}"
- "result + result1");
- CHECK_EQ(239 + 42, value->Int32Value());
-}
-
-
-// Test the case when we stored callback into
-// a stub, but interceptor produced value on its own.
-THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
- templ_o->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
- templ_p->SetAccessor(v8_str("y"), Return239Callback);
-
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
- context->Global()->Set(v8_str("p"), templ_p->NewInstance());
-
- v8::Handle<Value> value = CompileRun(
- "o.__proto__ = p;"
- "for (var i = 0; i < 7; i++) {"
- " o.x;"
- // Now it should be ICed and keep a reference to x defined on p
- "}"
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result += o.x;"
- "}"
- "result");
- CHECK_EQ(42 * 7, value->Int32Value());
-}
-
-
-// Test the case when we stored callback into
-// a stub, but it got invalidated later on.
-THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
- templ_o->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
- templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
-
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
- context->Global()->Set(v8_str("p"), templ_p->NewInstance());
-
- v8::Handle<Value> value = CompileRun(
- "inbetween = new Object();"
- "o.__proto__ = inbetween;"
- "inbetween.__proto__ = p;"
- "for (var i = 0; i < 10; i++) {"
- " o.y;"
- // Now it should be ICed and keep a reference to y defined on p
- "}"
- "inbetween.y = 42;"
- "var result = 0;"
- "for (var i = 0; i < 10; i++) {"
- " result += o.y;"
- "}"
- "result");
- CHECK_EQ(42 * 10, value->Int32Value());
-}
-
-
-// Test the case when we stored callback into
-// a stub, but it got invalidated later on due to override on
-// global object which is between interceptor and callbacks' holders.
-THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
- templ_o->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
- templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
-
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
- context->Global()->Set(v8_str("p"), templ_p->NewInstance());
-
- v8::Handle<Value> value = CompileRun(
- "o.__proto__ = this;"
- "this.__proto__ = p;"
- "for (var i = 0; i < 10; i++) {"
- " if (o.y != 239) throw 'oops: ' + o.y;"
- // Now it should be ICed and keep a reference to y defined on p
- "}"
- "this.y = 42;"
- "var result = 0;"
- "for (var i = 0; i < 10; i++) {"
- " result += o.y;"
- "}"
- "result");
- CHECK_EQ(42 * 10, value->Int32Value());
-}
-
-
-static void InterceptorLoadICGetter0(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- CHECK(v8_str("x")->Equals(name));
- info.GetReturnValue().Set(v8::Integer::New(info.GetIsolate(), 0));
-}
-
-
-THREADED_TEST(InterceptorReturningZero) {
- CheckInterceptorLoadIC(InterceptorLoadICGetter0,
- "o.x == undefined ? 1 : 0",
- 0);
-}
-
-
-static void InterceptorStoreICSetter(
- Local<Name> key, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK(v8_str("x")->Equals(key));
- CHECK_EQ(42, value->Int32Value());
- info.GetReturnValue().Set(value);
-}
-
-
-// This test should hit the store IC for the interceptor case.
-THREADED_TEST(InterceptorStoreIC) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- InterceptorLoadICGetter, InterceptorStoreICSetter, 0, 0, 0,
- v8_str("data")));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- CompileRun(
- "for (var i = 0; i < 1000; i++) {"
- " o.x = 42;"
- "}");
-}
-
-
-THREADED_TEST(InterceptorStoreICWithNoSetter) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
- "for (var i = 0; i < 1000; i++) {"
- " o.y = 239;"
- "}"
- "42 + o.y");
- CHECK_EQ(239 + 42, value->Int32Value());
-}
-
-
-
-
-v8::Handle<Value> call_ic_function;
-v8::Handle<Value> call_ic_function2;
-v8::Handle<Value> call_ic_function3;
-
-static void InterceptorCallICGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- CHECK(v8_str("x")->Equals(name));
- info.GetReturnValue().Set(call_ic_function);
-}
-
-
-// This test should hit the call IC for the interceptor case.
-THREADED_TEST(InterceptorCallIC) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- call_ic_function =
- v8_compile("function f(x) { return x + 1; }; f")->Run();
- v8::Handle<Value> value = CompileRun(
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result = o.x(41);"
- "}");
- CHECK_EQ(42, value->Int32Value());
-}
-
-
-// This test checks that if interceptor doesn't provide
-// a value, we can fetch regular value.
-THREADED_TEST(InterceptorCallICSeesOthers) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
- "o.x = function f(x) { return x + 1; };"
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result = o.x(41);"
- "}");
- CHECK_EQ(42, value->Int32Value());
-}
-
-
-static v8::Handle<Value> call_ic_function4;
-static void InterceptorCallICGetter4(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- CHECK(v8_str("x")->Equals(name));
- info.GetReturnValue().Set(call_ic_function4);
-}
-
-
-// This test checks that if interceptor provides a function,
-// even if we cached shadowed variant, interceptor's function
-// is invoked
-THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter4));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- call_ic_function4 =
- v8_compile("function f(x) { return x - 1; }; f")->Run();
- v8::Handle<Value> value = CompileRun(
- "Object.getPrototypeOf(o).x = function(x) { return x + 1; };"
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result = o.x(42);"
- "}");
- CHECK_EQ(41, value->Int32Value());
-}
-
-
-// Test the case when we stored cacheable lookup into
-// a stub, but it got invalidated later on
-THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
- "proto1 = new Object();"
- "proto2 = new Object();"
- "o.__proto__ = proto1;"
- "proto1.__proto__ = proto2;"
- "proto2.y = function(x) { return x + 1; };"
- // Invoke it many times to compile a stub
- "for (var i = 0; i < 7; i++) {"
- " o.y(42);"
- "}"
- "proto1.y = function(x) { return x - 1; };"
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result += o.y(42);"
- "}");
- CHECK_EQ(41 * 7, value->Int32Value());
-}
-
-
-// This test checks that if interceptor doesn't provide a function,
-// cached constant function is used
-THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
- "function inc(x) { return x + 1; };"
- "inc(1);"
- "o.x = inc;"
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result = o.x(42);"
- "}");
- CHECK_EQ(43, value->Int32Value());
-}
-
-
-static v8::Handle<Value> call_ic_function5;
-static void InterceptorCallICGetter5(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (v8_str("x")->Equals(name))
- info.GetReturnValue().Set(call_ic_function5);
-}
-
-
-// This test checks that if interceptor provides a function,
-// even if we cached constant function, interceptor's function
-// is invoked
-THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter5));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- call_ic_function5 =
- v8_compile("function f(x) { return x - 1; }; f")->Run();
- v8::Handle<Value> value = CompileRun(
- "function inc(x) { return x + 1; };"
- "inc(1);"
- "o.x = inc;"
- "var result = 0;"
- "for (var i = 0; i < 1000; i++) {"
- " result = o.x(42);"
- "}");
- CHECK_EQ(41, value->Int32Value());
-}
-
-
-static v8::Handle<Value> call_ic_function6;
-static void InterceptorCallICGetter6(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (v8_str("x")->Equals(name))
- info.GetReturnValue().Set(call_ic_function6);
-}
-
-
-// Same test as above, except the code is wrapped in a function
-// to test the optimized compiler.
-THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
- i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter6));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- call_ic_function6 =
- v8_compile("function f(x) { return x - 1; }; f")->Run();
- v8::Handle<Value> value = CompileRun(
- "function inc(x) { return x + 1; };"
- "inc(1);"
- "o.x = inc;"
- "function test() {"
- " var result = 0;"
- " for (var i = 0; i < 1000; i++) {"
- " result = o.x(42);"
- " }"
- " return result;"
- "};"
- "test();"
- "test();"
- "test();"
- "%OptimizeFunctionOnNextCall(test);"
- "test()");
- CHECK_EQ(41, value->Int32Value());
-}
-
-
-// Test the case when we stored constant function into
-// a stub, but it got invalidated later on
-THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
- "function inc(x) { return x + 1; };"
- "inc(1);"
- "proto1 = new Object();"
- "proto2 = new Object();"
- "o.__proto__ = proto1;"
- "proto1.__proto__ = proto2;"
- "proto2.y = inc;"
- // Invoke it many times to compile a stub
- "for (var i = 0; i < 7; i++) {"
- " o.y(42);"
- "}"
- "proto1.y = function(x) { return x - 1; };"
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result += o.y(42);"
- "}");
- CHECK_EQ(41 * 7, value->Int32Value());
-}
-
-
-// Test the case when we stored constant function into
-// a stub, but it got invalidated later on due to override on
-// global object which is between interceptor and constant function' holders.
-THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
- "function inc(x) { return x + 1; };"
- "inc(1);"
- "o.__proto__ = this;"
- "this.__proto__.y = inc;"
- // Invoke it many times to compile a stub
- "for (var i = 0; i < 7; i++) {"
- " if (o.y(42) != 43) throw 'oops: ' + o.y(42);"
- "}"
- "this.y = function(x) { return x - 1; };"
- "var result = 0;"
- "for (var i = 0; i < 7; i++) {"
- " result += o.y(42);"
- "}");
- CHECK_EQ(41 * 7, value->Int32Value());
-}
-
-
-// Test the case when actual function to call sits on global object.
-THREADED_TEST(InterceptorCallICCachedFromGlobal) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
- templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
-
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
-
- v8::Handle<Value> value = CompileRun(
- "try {"
- " o.__proto__ = this;"
- " for (var i = 0; i < 10; i++) {"
- " var v = o.parseFloat('239');"
- " if (v != 239) throw v;"
- // Now it should be ICed and keep a reference to parseFloat.
- " }"
- " var result = 0;"
- " for (var i = 0; i < 10; i++) {"
- " result += o.parseFloat('239');"
- " }"
- " result"
- "} catch(e) {"
- " e"
- "};");
- CHECK_EQ(239 * 10, value->Int32Value());
-}
-
static void InterceptorCallICFastApi(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
@@ -12837,7 +10276,7 @@ static void FastApiCallback_TrivialSignature(
CheckReturnValue(args, FUNCTION_ADDR(FastApiCallback_TrivialSignature));
v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, args.GetIsolate());
- CHECK_EQ(args.This(), args.Holder());
+ CHECK(args.This()->Equals(args.Holder()));
CHECK(args.Data()->Equals(v8_str("method_data")));
args.GetReturnValue().Set(args[0]->Int32Value() + 1);
}
@@ -12848,7 +10287,7 @@ static void FastApiCallback_SimpleSignature(
CheckReturnValue(args, FUNCTION_ADDR(FastApiCallback_SimpleSignature));
v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, args.GetIsolate());
- CHECK_EQ(args.This()->GetPrototype(), args.Holder());
+ CHECK(args.This()->GetPrototype()->Equals(args.Holder()));
CHECK(args.Data()->Equals(v8_str("method_data")));
// Note, we're using HasRealNamedProperty instead of Has to avoid
// invoking the interceptor again.
@@ -12926,18 +10365,22 @@ THREADED_TEST(CallICFastApi_DirectCall_Throw) {
" }"
"}"
"f(); result;");
- CHECK_EQ(v8_str("ggggg"), result);
+ CHECK(v8_str("ggggg")->Equals(result));
}
+static int p_getter_count_3;
+
+
static Handle<Value> DoDirectGetter() {
- if (++p_getter_count % 3 == 0) {
+ if (++p_getter_count_3 % 3 == 0) {
CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
GenerateSomeGarbage();
}
return v8_str("Direct Getter Result");
}
+
static void DirectGetterCallback(
Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -12954,15 +10397,15 @@ static void LoadICFastApi_DirectCall_GCMoveStub(Accessor accessor) {
v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("p1"), accessor);
context->Global()->Set(v8_str("o1"), obj->NewInstance());
- p_getter_count = 0;
+ p_getter_count_3 = 0;
v8::Handle<v8::Value> result = CompileRun(
"function f() {"
" for (var i = 0; i < 30; i++) o1.p1;"
" return o1.p1"
"}"
"f();");
- CHECK_EQ(v8_str("Direct Getter Result"), result);
- CHECK_EQ(31, p_getter_count);
+ CHECK(v8_str("Direct Getter Result")->Equals(result));
+ CHECK_EQ(31, p_getter_count_3);
}
@@ -12991,7 +10434,7 @@ THREADED_TEST(LoadICFastApi_DirectCall_Throw) {
" try { o1.p1; } catch (e) { result += e; }"
"}"
"result;");
- CHECK_EQ(v8_str("ggggg"), result);
+ CHECK(v8_str("ggggg")->Equals(result));
}
@@ -13173,8 +10616,8 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
"}");
CHECK(try_catch.HasCaught());
// TODO(verwaest): Adjust message.
- CHECK_EQ(v8_str("TypeError: undefined is not a function"),
- try_catch.Exception()->ToString(isolate));
+ CHECK(v8_str("TypeError: receiver.method is not a function")
+ ->Equals(try_catch.Exception()->ToString(isolate)));
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
CHECK_GE(interceptor_call_count, 50);
}
@@ -13215,8 +10658,8 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
" }"
"}");
CHECK(try_catch.HasCaught());
- CHECK_EQ(v8_str("TypeError: Illegal invocation"),
- try_catch.Exception()->ToString(isolate));
+ CHECK(v8_str("TypeError: Illegal invocation")
+ ->Equals(try_catch.Exception()->ToString(isolate)));
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
CHECK_GE(interceptor_call_count, 50);
}
@@ -13348,8 +10791,8 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
"}");
CHECK(try_catch.HasCaught());
// TODO(verwaest): Adjust message.
- CHECK_EQ(v8_str("TypeError: undefined is not a function"),
- try_catch.Exception()->ToString(isolate));
+ CHECK(v8_str("TypeError: receiver.method is not a function")
+ ->Equals(try_catch.Exception()->ToString(isolate)));
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
@@ -13386,350 +10829,12 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_TypeError) {
" }"
"}");
CHECK(try_catch.HasCaught());
- CHECK_EQ(v8_str("TypeError: Illegal invocation"),
- try_catch.Exception()->ToString(isolate));
+ CHECK(v8_str("TypeError: Illegal invocation")
+ ->Equals(try_catch.Exception()->ToString(isolate)));
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
-v8::Handle<Value> keyed_call_ic_function;
-
-static void InterceptorKeyedCallICGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (v8_str("x")->Equals(name)) {
- info.GetReturnValue().Set(keyed_call_ic_function);
- }
-}
-
-
-// Test the case when we stored cacheable lookup into
-// a stub, but the function name changed (to another cacheable function).
-THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- CompileRun(
- "proto = new Object();"
- "proto.y = function(x) { return x + 1; };"
- "proto.z = function(x) { return x - 1; };"
- "o.__proto__ = proto;"
- "var result = 0;"
- "var method = 'y';"
- "for (var i = 0; i < 10; i++) {"
- " if (i == 5) { method = 'z'; };"
- " result += o[method](41);"
- "}");
- CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
-}
-
-
-// Test the case when we stored cacheable lookup into
-// a stub, but the function name changed (and the new function is present
-// both before and after the interceptor in the prototype chain).
-THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorKeyedCallICGetter));
- LocalContext context;
- context->Global()->Set(v8_str("proto1"), templ->NewInstance());
- keyed_call_ic_function =
- v8_compile("function f(x) { return x - 1; }; f")->Run();
- CompileRun(
- "o = new Object();"
- "proto2 = new Object();"
- "o.y = function(x) { return x + 1; };"
- "proto2.y = function(x) { return x + 2; };"
- "o.__proto__ = proto1;"
- "proto1.__proto__ = proto2;"
- "var result = 0;"
- "var method = 'x';"
- "for (var i = 0; i < 10; i++) {"
- " if (i == 5) { method = 'y'; };"
- " result += o[method](41);"
- "}");
- CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
-}
-
-
-// Same as InterceptorKeyedCallICKeyChange1 only the cacheable function sit
-// on the global object.
-THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- CompileRun(
- "function inc(x) { return x + 1; };"
- "inc(1);"
- "function dec(x) { return x - 1; };"
- "dec(1);"
- "o.__proto__ = this;"
- "this.__proto__.x = inc;"
- "this.__proto__.y = dec;"
- "var result = 0;"
- "var method = 'x';"
- "for (var i = 0; i < 10; i++) {"
- " if (i == 5) { method = 'y'; };"
- " result += o[method](41);"
- "}");
- CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
-}
-
-
-// Test the case when actual function to call sits on global object.
-THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
- templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
-
- CompileRun(
- "function len(x) { return x.length; };"
- "o.__proto__ = this;"
- "var m = 'parseFloat';"
- "var result = 0;"
- "for (var i = 0; i < 10; i++) {"
- " if (i == 5) {"
- " m = 'len';"
- " saved_result = result;"
- " };"
- " result = o[m]('239');"
- "}");
- CHECK_EQ(3, context->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(239, context->Global()->Get(v8_str("saved_result"))->Int32Value());
-}
-
-
-// Test the map transition before the interceptor.
-THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
- templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
-
- CompileRun(
- "var o = new Object();"
- "o.__proto__ = proto;"
- "o.method = function(x) { return x + 1; };"
- "var m = 'method';"
- "var result = 0;"
- "for (var i = 0; i < 10; i++) {"
- " if (i == 5) { o.method = function(x) { return x - 1; }; };"
- " result += o[m](41);"
- "}");
- CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
-}
-
-
-// Test the map transition after the interceptor.
-THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
- templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
- LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
-
- CompileRun(
- "var proto = new Object();"
- "o.__proto__ = proto;"
- "proto.method = function(x) { return x + 1; };"
- "var m = 'method';"
- "var result = 0;"
- "for (var i = 0; i < 10; i++) {"
- " if (i == 5) { proto.method = function(x) { return x - 1; }; };"
- " result += o[m](41);"
- "}");
- CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
-}
-
-
-static int interceptor_call_count = 0;
-
-static void InterceptorICRefErrorGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (v8_str("x")->Equals(name) && interceptor_call_count++ < 20) {
- info.GetReturnValue().Set(call_ic_function2);
- }
-}
-
-
-// This test should hit load and call ICs for the interceptor case.
-// Once in a while, the interceptor will reply that a property was not
-// found in which case we should get a reference error.
-THREADED_TEST(InterceptorICReferenceErrors) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorICRefErrorGetter));
- LocalContext context(0, templ, v8::Handle<Value>());
- call_ic_function2 = v8_compile("function h(x) { return x; }; h")->Run();
- v8::Handle<Value> value = CompileRun(
- "function f() {"
- " for (var i = 0; i < 1000; i++) {"
- " try { x; } catch(e) { return true; }"
- " }"
- " return false;"
- "};"
- "f();");
- CHECK_EQ(true, value->BooleanValue());
- interceptor_call_count = 0;
- value = CompileRun(
- "function g() {"
- " for (var i = 0; i < 1000; i++) {"
- " try { x(42); } catch(e) { return true; }"
- " }"
- " return false;"
- "};"
- "g();");
- CHECK_EQ(true, value->BooleanValue());
-}
-
-
-static int interceptor_ic_exception_get_count = 0;
-
-static void InterceptorICExceptionGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (v8_str("x")->Equals(name) && ++interceptor_ic_exception_get_count < 20) {
- info.GetReturnValue().Set(call_ic_function3);
- }
- if (interceptor_ic_exception_get_count == 20) {
- info.GetIsolate()->ThrowException(v8_num(42));
- return;
- }
-}
-
-
-// Test interceptor load/call IC where the interceptor throws an
-// exception once in a while.
-THREADED_TEST(InterceptorICGetterExceptions) {
- interceptor_ic_exception_get_count = 0;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorICExceptionGetter));
- LocalContext context(0, templ, v8::Handle<Value>());
- call_ic_function3 = v8_compile("function h(x) { return x; }; h")->Run();
- v8::Handle<Value> value = CompileRun(
- "function f() {"
- " for (var i = 0; i < 100; i++) {"
- " try { x; } catch(e) { return true; }"
- " }"
- " return false;"
- "};"
- "f();");
- CHECK_EQ(true, value->BooleanValue());
- interceptor_ic_exception_get_count = 0;
- value = CompileRun(
- "function f() {"
- " for (var i = 0; i < 100; i++) {"
- " try { x(42); } catch(e) { return true; }"
- " }"
- " return false;"
- "};"
- "f();");
- CHECK_EQ(true, value->BooleanValue());
-}
-
-
-static int interceptor_ic_exception_set_count = 0;
-
-static void InterceptorICExceptionSetter(
- Local<Name> key, Local<Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- if (++interceptor_ic_exception_set_count > 20) {
- info.GetIsolate()->ThrowException(v8_num(42));
- }
-}
-
-
-// Test interceptor store IC where the interceptor throws an exception
-// once in a while.
-THREADED_TEST(InterceptorICSetterExceptions) {
- interceptor_ic_exception_set_count = 0;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(0, InterceptorICExceptionSetter));
- LocalContext context(0, templ, v8::Handle<Value>());
- v8::Handle<Value> value = CompileRun(
- "function f() {"
- " for (var i = 0; i < 100; i++) {"
- " try { x = 42; } catch(e) { return true; }"
- " }"
- " return false;"
- "};"
- "f();");
- CHECK_EQ(true, value->BooleanValue());
-}
-
-
-// Test that we ignore null interceptors.
-THREADED_TEST(NullNamedInterceptor) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- static_cast<v8::GenericNamedPropertyGetterCallback>(0)));
- LocalContext context;
- templ->Set(CcTest::isolate(), "x", v8_num(42));
- v8::Handle<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
- v8::Handle<Value> value = CompileRun("obj.x");
- CHECK(value->IsInt32());
- CHECK_EQ(42, value->Int32Value());
-}
-
-
-// Test that we ignore null interceptors.
-THREADED_TEST(NullIndexedInterceptor) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
- static_cast<v8::IndexedPropertyGetterCallback>(0)));
- LocalContext context;
- templ->Set(CcTest::isolate(), "42", v8_num(42));
- v8::Handle<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
- v8::Handle<Value> value = CompileRun("obj[42]");
- CHECK(value->IsInt32());
- CHECK_EQ(42, value->Int32Value());
-}
-
-
-THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
- templ->InstanceTemplate()->SetHandler(
- v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- LocalContext env;
- env->Global()->Set(v8_str("obj"),
- templ->GetFunction()->NewInstance());
- ExpectTrue("obj.x === 42");
- ExpectTrue("!obj.propertyIsEnumerable('x')");
-}
-
-
static void ThrowingGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
@@ -14041,19 +11146,58 @@ TEST(ObjectProtoToStringES6) {
} while (0)
TEST_TOSTRINGTAG(Array, Object, Object);
- TEST_TOSTRINGTAG(Object, Arguments, ~Arguments);
- TEST_TOSTRINGTAG(Object, Array, ~Array);
- TEST_TOSTRINGTAG(Object, Boolean, ~Boolean);
- TEST_TOSTRINGTAG(Object, Date, ~Date);
- TEST_TOSTRINGTAG(Object, Error, ~Error);
- TEST_TOSTRINGTAG(Object, Function, ~Function);
- TEST_TOSTRINGTAG(Object, Number, ~Number);
- TEST_TOSTRINGTAG(Object, RegExp, ~RegExp);
- TEST_TOSTRINGTAG(Object, String, ~String);
+ TEST_TOSTRINGTAG(Object, Arguments, Arguments);
+ TEST_TOSTRINGTAG(Object, Array, Array);
+ TEST_TOSTRINGTAG(Object, Boolean, Boolean);
+ TEST_TOSTRINGTAG(Object, Date, Date);
+ TEST_TOSTRINGTAG(Object, Error, Error);
+ TEST_TOSTRINGTAG(Object, Function, Function);
+ TEST_TOSTRINGTAG(Object, Number, Number);
+ TEST_TOSTRINGTAG(Object, RegExp, RegExp);
+ TEST_TOSTRINGTAG(Object, String, String);
TEST_TOSTRINGTAG(Object, Foo, Foo);
#undef TEST_TOSTRINGTAG
+ Local<v8::RegExp> valueRegExp = v8::RegExp::New(v8_str("^$"),
+ v8::RegExp::kNone);
+ Local<Value> valueNumber = v8_num(123);
+ Local<v8::Symbol> valueSymbol = v8_symbol("TestSymbol");
+ Local<v8::Function> valueFunction =
+ CompileRun("(function fn() {})").As<v8::Function>();
+ Local<v8::Object> valueObject = v8::Object::New(v8::Isolate::GetCurrent());
+ Local<v8::Primitive> valueNull = v8::Null(v8::Isolate::GetCurrent());
+ Local<v8::Primitive> valueUndef = v8::Undefined(v8::Isolate::GetCurrent());
+
+#define TEST_TOSTRINGTAG(type, tagValue, expected) \
+ do { \
+ object = CompileRun("new " #type "()"); \
+ object.As<v8::Object>()->Set(toStringTag, tagValue); \
+ value = object.As<v8::Object>()->ObjectProtoToString(); \
+ CHECK(value->IsString() && \
+ value->Equals(v8_str("[object " #expected "]"))); \
+ } while (0)
+
+#define TEST_TOSTRINGTAG_TYPES(tagValue) \
+ TEST_TOSTRINGTAG(Array, tagValue, Array); \
+ TEST_TOSTRINGTAG(Object, tagValue, Object); \
+ TEST_TOSTRINGTAG(Function, tagValue, Function); \
+ TEST_TOSTRINGTAG(Date, tagValue, Date); \
+ TEST_TOSTRINGTAG(RegExp, tagValue, RegExp); \
+ TEST_TOSTRINGTAG(Error, tagValue, Error); \
+
+ // Test non-String-valued @@toStringTag
+ TEST_TOSTRINGTAG_TYPES(valueRegExp);
+ TEST_TOSTRINGTAG_TYPES(valueNumber);
+ TEST_TOSTRINGTAG_TYPES(valueSymbol);
+ TEST_TOSTRINGTAG_TYPES(valueFunction);
+ TEST_TOSTRINGTAG_TYPES(valueObject);
+ TEST_TOSTRINGTAG_TYPES(valueNull);
+ TEST_TOSTRINGTAG_TYPES(valueUndef);
+
+#undef TEST_TOSTRINGTAG
+#undef TEST_TOSTRINGTAG_TYPES
+
// @@toStringTag getter throws
Local<Value> obj = v8::Object::New(isolate);
obj.As<v8::Object>()->SetAccessor(toStringTag, ThrowingSymbolAccessorGetter);
@@ -15167,14 +12311,14 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
i::FLAG_incremental_marking = false;
if (i::FLAG_never_compact) return;
const char* script =
- "function bar() {"
- " var sum = 0;"
- " for (i = 0; i < 100; ++i)"
- " sum = foo(i);"
- " return sum;"
- "}"
- "function foo(i) { return i * i; };"
- "bar();";
+ "function bar() {"
+ " var sum = 0;"
+ " for (i = 0; i < 10; ++i)"
+ " sum = foo(i);"
+ " return sum;"
+ "}"
+ "function foo(i) { return i; };"
+ "bar();";
// Run this test in a new isolate to make sure we don't
// have remnants of state from other code.
@@ -15183,6 +12327,9 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Heap* heap = i_isolate->heap();
+ // Start with a clean slate.
+ heap->CollectAllAvailableGarbage("TestSetJitCodeEventHandler_Prepare");
+
{
v8::HandleScope scope(isolate);
i::HashMap code(MatchPointers);
@@ -15216,7 +12363,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
}
// Force code movement.
- heap->CollectAllAvailableGarbage("TestSetJitCodeEventHandler");
+ heap->CollectAllAvailableGarbage("TestSetJitCodeEventHandler_Move");
isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
@@ -15257,7 +12404,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
// notifications, we could compare two collections, one created by listening
// from the time of creation of an isolate, and the other by subscribing
// with EnumExisting.
- CHECK_LT(0, code.occupancy());
+ CHECK_LT(0u, code.occupancy());
code_map = NULL;
}
@@ -15278,6 +12425,12 @@ THREADED_TEST(ExternalAllocatedMemory) {
isolate->AdjustAmountOfExternalAllocatedMemory(kSize));
CHECK_EQ(baseline,
isolate->AdjustAmountOfExternalAllocatedMemory(-kSize));
+ const int64_t kTriggerGCSize =
+ v8::internal::Internals::kExternalAllocationLimit + 1;
+ CHECK_EQ(baseline + kTriggerGCSize,
+ isolate->AdjustAmountOfExternalAllocatedMemory(kTriggerGCSize));
+ CHECK_EQ(baseline,
+ isolate->AdjustAmountOfExternalAllocatedMemory(-kTriggerGCSize));
}
@@ -15333,9 +12486,9 @@ static void CheckTryCatchSourceInfo(v8::Handle<v8::Script> script,
CHECK_EQ(2, message->GetStartColumn());
CHECK_EQ(3, message->GetEndColumn());
v8::String::Utf8Value line(message->GetSourceLine());
- CHECK_EQ(" throw 'nirk';", *line);
+ CHECK_EQ(0, strcmp(" throw 'nirk';", *line));
v8::String::Utf8Value name(message->GetScriptOrigin().ResourceName());
- CHECK_EQ(resource_name, *name);
+ CHECK_EQ(0, strcmp(resource_name, *name));
}
@@ -15413,7 +12566,7 @@ THREADED_TEST(CallbackFunctionName) {
v8::Handle<v8::Value> value = CompileRun("obj.asdf.name");
CHECK(value->IsString());
v8::String::Utf8Value name(value);
- CHECK_EQ("asdf", *name);
+ CHECK_EQ(0, strcmp("asdf", *name));
}
@@ -15427,30 +12580,26 @@ THREADED_TEST(DateAccess) {
}
-void CheckProperties(v8::Isolate* isolate,
- v8::Handle<v8::Value> val,
- int elmc,
- const char* elmv[]) {
+void CheckProperties(v8::Isolate* isolate, v8::Handle<v8::Value> val,
+ unsigned elmc, const char* elmv[]) {
v8::Handle<v8::Object> obj = val.As<v8::Object>();
v8::Handle<v8::Array> props = obj->GetPropertyNames();
CHECK_EQ(elmc, props->Length());
- for (int i = 0; i < elmc; i++) {
+ for (unsigned i = 0; i < elmc; i++) {
v8::String::Utf8Value elm(props->Get(v8::Integer::New(isolate, i)));
- CHECK_EQ(elmv[i], *elm);
+ CHECK_EQ(0, strcmp(elmv[i], *elm));
}
}
-void CheckOwnProperties(v8::Isolate* isolate,
- v8::Handle<v8::Value> val,
- int elmc,
- const char* elmv[]) {
+void CheckOwnProperties(v8::Isolate* isolate, v8::Handle<v8::Value> val,
+ unsigned elmc, const char* elmv[]) {
v8::Handle<v8::Object> obj = val.As<v8::Object>();
v8::Handle<v8::Array> props = obj->GetOwnPropertyNames();
CHECK_EQ(elmc, props->Length());
- for (int i = 0; i < elmc; i++) {
+ for (unsigned i = 0; i < elmc; i++) {
v8::String::Utf8Value elm(props->Get(v8::Integer::New(isolate, i)));
- CHECK_EQ(elmv[i], *elm);
+ CHECK_EQ(0, strcmp(elmv[i], *elm));
}
}
@@ -15469,7 +12618,7 @@ THREADED_TEST(PropertyEnumeration) {
"result[3] = x;"
"result;");
v8::Handle<v8::Array> elms = obj.As<v8::Array>();
- CHECK_EQ(4, elms->Length());
+ CHECK_EQ(4u, elms->Length());
int elmc0 = 0;
const char** elmv0 = NULL;
CheckProperties(
@@ -15513,7 +12662,7 @@ THREADED_TEST(PropertyEnumeration2) {
"result[3] = x;"
"result;");
v8::Handle<v8::Array> elms = obj.As<v8::Array>();
- CHECK_EQ(4, elms->Length());
+ CHECK_EQ(4u, elms->Length());
int elmc0 = 0;
const char** elmv0 = NULL;
CheckProperties(isolate,
@@ -15521,7 +12670,7 @@ THREADED_TEST(PropertyEnumeration2) {
v8::Handle<v8::Value> val = elms->Get(v8::Integer::New(isolate, 0));
v8::Handle<v8::Array> props = val.As<v8::Object>()->GetPropertyNames();
- CHECK_EQ(0, props->Length());
+ CHECK_EQ(0u, props->Length());
for (uint32_t i = 0; i < props->Length(); i++) {
printf("p[%u]\n", i);
}
@@ -15905,20 +13054,20 @@ TEST(ObjectClone) {
Local<v8::Object> obj = val.As<v8::Object>();
obj->Set(v8_str("gamma"), v8_str("cloneme"));
- CHECK_EQ(v8_str("hello"), obj->Get(v8_str("alpha")));
- CHECK_EQ(v8::Integer::New(isolate, 123), obj->Get(v8_str("beta")));
- CHECK_EQ(v8_str("cloneme"), obj->Get(v8_str("gamma")));
+ CHECK(v8_str("hello")->Equals(obj->Get(v8_str("alpha"))));
+ CHECK(v8::Integer::New(isolate, 123)->Equals(obj->Get(v8_str("beta"))));
+ CHECK(v8_str("cloneme")->Equals(obj->Get(v8_str("gamma"))));
// Clone it.
Local<v8::Object> clone = obj->Clone();
- CHECK_EQ(v8_str("hello"), clone->Get(v8_str("alpha")));
- CHECK_EQ(v8::Integer::New(isolate, 123), clone->Get(v8_str("beta")));
- CHECK_EQ(v8_str("cloneme"), clone->Get(v8_str("gamma")));
+ CHECK(v8_str("hello")->Equals(clone->Get(v8_str("alpha"))));
+ CHECK(v8::Integer::New(isolate, 123)->Equals(clone->Get(v8_str("beta"))));
+ CHECK(v8_str("cloneme")->Equals(clone->Get(v8_str("gamma"))));
// Set a property on the clone, verify each object.
clone->Set(v8_str("beta"), v8::Integer::New(isolate, 456));
- CHECK_EQ(v8::Integer::New(isolate, 123), obj->Get(v8_str("beta")));
- CHECK_EQ(v8::Integer::New(isolate, 456), clone->Get(v8_str("beta")));
+ CHECK(v8::Integer::New(isolate, 123)->Equals(obj->Get(v8_str("beta"))));
+ CHECK(v8::Integer::New(isolate, 456)->Equals(clone->Get(v8_str("beta"))));
}
@@ -16033,12 +13182,12 @@ THREADED_TEST(MorphCompositeStringTest) {
const char* expected_slice_on_cons =
"ow is the time for all good men to come to the aid of the party"
"Now is the time for all good men to come to the aid of the part";
- CHECK_EQ(String::NewFromUtf8(env->GetIsolate(), expected_cons),
- env->Global()->Get(v8_str("cons")));
- CHECK_EQ(String::NewFromUtf8(env->GetIsolate(), expected_slice),
- env->Global()->Get(v8_str("slice")));
- CHECK_EQ(String::NewFromUtf8(env->GetIsolate(), expected_slice_on_cons),
- env->Global()->Get(v8_str("slice_on_cons")));
+ CHECK(String::NewFromUtf8(env->GetIsolate(), expected_cons)
+ ->Equals(env->Global()->Get(v8_str("cons"))));
+ CHECK(String::NewFromUtf8(env->GetIsolate(), expected_slice)
+ ->Equals(env->Global()->Get(v8_str("slice"))));
+ CHECK(String::NewFromUtf8(env->GetIsolate(), expected_slice_on_cons)
+ ->Equals(env->Global()->Get(v8_str("slice_on_cons"))));
}
i::DeleteArray(two_byte_string);
}
@@ -16161,13 +13310,13 @@ TEST(ReadOnlyPropertyInGlobalProto) {
// Check without 'eval' or 'with'.
v8::Handle<v8::Value> res =
CompileRun("function f() { x = 42; return x; }; f()");
- CHECK_EQ(v8::Integer::New(isolate, 0), res);
+ CHECK(v8::Integer::New(isolate, 0)->Equals(res));
// Check with 'eval'.
res = CompileRun("function f() { eval('1'); y = 43; return y; }; f()");
- CHECK_EQ(v8::Integer::New(isolate, 0), res);
+ CHECK(v8::Integer::New(isolate, 0)->Equals(res));
// Check with 'with'.
res = CompileRun("function f() { with (this) { y = 44 }; return y; }; f()");
- CHECK_EQ(v8::Integer::New(isolate, 0), res);
+ CHECK(v8::Integer::New(isolate, 0)->Equals(res));
}
static int force_set_set_count = 0;
@@ -16299,139 +13448,6 @@ TEST(ForceSetWithInterceptor) {
}
-THREADED_TEST(ForceDelete) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
- LocalContext context(NULL, templ);
- v8::Handle<v8::Object> global = context->Global();
-
- // Ordinary properties
- v8::Handle<v8::String> simple_property =
- v8::String::NewFromUtf8(isolate, "p");
- global->ForceSet(simple_property, v8::Int32::New(isolate, 4), v8::DontDelete);
- CHECK_EQ(4, global->Get(simple_property)->Int32Value());
- // This should fail because the property is dont-delete.
- CHECK(!global->Delete(simple_property));
- CHECK_EQ(4, global->Get(simple_property)->Int32Value());
- // This should succeed even though the property is dont-delete.
- CHECK(global->ForceDelete(simple_property));
- CHECK(global->Get(simple_property)->IsUndefined());
-}
-
-
-static int force_delete_interceptor_count = 0;
-static bool pass_on_delete = false;
-
-
-static void ForceDeleteDeleter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Boolean>& info) {
- force_delete_interceptor_count++;
- if (pass_on_delete) return;
- info.GetReturnValue().Set(true);
-}
-
-
-THREADED_TEST(ForceDeleteWithInterceptor) {
- force_delete_interceptor_count = 0;
- pass_on_delete = false;
-
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
- templ->SetHandler(
- v8::NamedPropertyHandlerConfiguration(0, 0, 0, ForceDeleteDeleter));
- LocalContext context(NULL, templ);
- v8::Handle<v8::Object> global = context->Global();
-
- v8::Handle<v8::String> some_property =
- v8::String::NewFromUtf8(isolate, "a");
- global->ForceSet(some_property, v8::Integer::New(isolate, 42),
- v8::DontDelete);
-
- // Deleting a property should get intercepted and nothing should
- // happen.
- CHECK_EQ(0, force_delete_interceptor_count);
- CHECK(global->Delete(some_property));
- CHECK_EQ(1, force_delete_interceptor_count);
- CHECK_EQ(42, global->Get(some_property)->Int32Value());
- // Deleting the property when the interceptor returns an empty
- // handle should not delete the property since it is DontDelete.
- pass_on_delete = true;
- CHECK(!global->Delete(some_property));
- CHECK_EQ(2, force_delete_interceptor_count);
- CHECK_EQ(42, global->Get(some_property)->Int32Value());
- // Forcing the property to be deleted should delete the value
- // without calling the interceptor.
- CHECK(global->ForceDelete(some_property));
- CHECK(global->Get(some_property)->IsUndefined());
- CHECK_EQ(2, force_delete_interceptor_count);
-}
-
-
-// Make sure that forcing a delete invalidates any IC stubs, so we
-// don't read the hole value.
-THREADED_TEST(ForceDeleteIC) {
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- // Create a DontDelete variable on the global object.
- CompileRun("this.__proto__ = { foo: 'horse' };"
- "var foo = 'fish';"
- "function f() { return foo.length; }");
- // Initialize the IC for foo in f.
- CompileRun("for (var i = 0; i < 4; i++) f();");
- // Make sure the value of foo is correct before the deletion.
- CHECK_EQ(4, CompileRun("f()")->Int32Value());
- // Force the deletion of foo.
- CHECK(context->Global()->ForceDelete(v8_str("foo")));
- // Make sure the value for foo is read from the prototype, and that
- // we don't get in trouble with reading the deleted cell value
- // sentinel.
- CHECK_EQ(5, CompileRun("f()")->Int32Value());
-}
-
-
-TEST(InlinedFunctionAcrossContexts) {
- i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope outer_scope(isolate);
- v8::Local<v8::Context> ctx1 = v8::Context::New(isolate);
- v8::Local<v8::Context> ctx2 = v8::Context::New(isolate);
- ctx1->Enter();
-
- {
- v8::HandleScope inner_scope(CcTest::isolate());
- CompileRun("var G = 42; function foo() { return G; }");
- v8::Local<v8::Value> foo = ctx1->Global()->Get(v8_str("foo"));
- ctx2->Enter();
- ctx2->Global()->Set(v8_str("o"), foo);
- v8::Local<v8::Value> res = CompileRun(
- "function f() { return o(); }"
- "for (var i = 0; i < 10; ++i) f();"
- "%OptimizeFunctionOnNextCall(f);"
- "f();");
- CHECK_EQ(42, res->Int32Value());
- ctx2->Exit();
- v8::Handle<v8::String> G_property =
- v8::String::NewFromUtf8(CcTest::isolate(), "G");
- CHECK(ctx1->Global()->ForceDelete(G_property));
- ctx2->Enter();
- ExpectString(
- "(function() {"
- " try {"
- " return f();"
- " } catch(e) {"
- " return e.toString();"
- " }"
- " })()",
- "ReferenceError: G is not defined");
- ctx2->Exit();
- ctx1->Exit();
- }
-}
-
-
static v8::Local<Context> calling_context0;
static v8::Local<Context> calling_context1;
static v8::Local<Context> calling_context2;
@@ -17183,10 +14199,8 @@ static void ObjectWithExternalArrayTestHelper(
CHECK_EQ(0, result->Int32Value());
if (array_type == v8::kExternalFloat64Array ||
array_type == v8::kExternalFloat32Array) {
- CHECK_EQ(static_cast<int>(v8::base::OS::nan_value()),
- static_cast<int>(
- i::Object::GetElement(
- isolate, jsobj, 7).ToHandleChecked()->Number()));
+ CHECK(std::isnan(
+ i::Object::GetElement(isolate, jsobj, 7).ToHandleChecked()->Number()));
} else {
CheckElementValue(isolate, 0, jsobj, 7);
}
@@ -17783,8 +14797,8 @@ void ExtArrayLimitsHelper(v8::Isolate* isolate,
last_location = last_message = NULL;
obj->SetIndexedPropertiesToExternalArrayData(NULL, array_type, size);
CHECK(!obj->HasIndexedPropertiesInExternalArrayData());
- CHECK_NE(NULL, last_location);
- CHECK_NE(NULL, last_message);
+ CHECK(last_location);
+ CHECK(last_message);
}
@@ -17832,10 +14846,9 @@ void TypedArrayTestHelper(v8::ExternalArrayType array_type,
TypedArray::New(ab, 2*sizeof(ElementType), kElementCount);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
CHECK_EQ(kElementCount, static_cast<int>(ta->Length()));
- CHECK_EQ(2*sizeof(ElementType), static_cast<int>(ta->ByteOffset()));
- CHECK_EQ(kElementCount*sizeof(ElementType),
- static_cast<int>(ta->ByteLength()));
- CHECK_EQ(ab, ta->Buffer());
+ CHECK_EQ(2 * sizeof(ElementType), ta->ByteOffset());
+ CHECK_EQ(kElementCount * sizeof(ElementType), ta->ByteLength());
+ CHECK(ab->Equals(ta->Buffer()));
ElementType* data = backing_store.start() + 2;
for (int i = 0; i < kElementCount; i++) {
@@ -17918,9 +14931,9 @@ THREADED_TEST(DataView) {
Local<v8::DataView> dv =
v8::DataView::New(ab, 2, kSize);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
- CHECK_EQ(2, static_cast<int>(dv->ByteOffset()));
+ CHECK_EQ(2u, dv->ByteOffset());
CHECK_EQ(kSize, static_cast<int>(dv->ByteLength()));
- CHECK_EQ(ab, dv->Buffer());
+ CHECK(ab->Equals(dv->Buffer()));
}
@@ -18344,29 +15357,39 @@ TEST(RethrowBogusErrorStackTrace) {
v8::PromiseRejectEvent reject_event = v8::kPromiseRejectWithNoHandler;
int promise_reject_counter = 0;
int promise_revoke_counter = 0;
+int promise_reject_msg_line_number = -1;
+int promise_reject_msg_column_number = -1;
int promise_reject_line_number = -1;
+int promise_reject_column_number = -1;
int promise_reject_frame_count = -1;
-void PromiseRejectCallback(v8::PromiseRejectMessage message) {
- if (message.GetEvent() == v8::kPromiseRejectWithNoHandler) {
+void PromiseRejectCallback(v8::PromiseRejectMessage reject_message) {
+ if (reject_message.GetEvent() == v8::kPromiseRejectWithNoHandler) {
promise_reject_counter++;
- CcTest::global()->Set(v8_str("rejected"), message.GetPromise());
- CcTest::global()->Set(v8_str("value"), message.GetValue());
- v8::Handle<v8::StackTrace> stack_trace =
- v8::Exception::CreateMessage(message.GetValue())->GetStackTrace();
+ CcTest::global()->Set(v8_str("rejected"), reject_message.GetPromise());
+ CcTest::global()->Set(v8_str("value"), reject_message.GetValue());
+ v8::Handle<v8::Message> message =
+ v8::Exception::CreateMessage(reject_message.GetValue());
+ v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+
+ promise_reject_msg_line_number = message->GetLineNumber();
+ promise_reject_msg_column_number = message->GetStartColumn() + 1;
+
if (!stack_trace.IsEmpty()) {
promise_reject_frame_count = stack_trace->GetFrameCount();
if (promise_reject_frame_count > 0) {
CHECK(stack_trace->GetFrame(0)->GetScriptName()->Equals(v8_str("pro")));
promise_reject_line_number = stack_trace->GetFrame(0)->GetLineNumber();
+ promise_reject_column_number = stack_trace->GetFrame(0)->GetColumn();
} else {
promise_reject_line_number = -1;
+ promise_reject_column_number = -1;
}
}
} else {
promise_revoke_counter++;
- CcTest::global()->Set(v8_str("revoked"), message.GetPromise());
- CHECK(message.GetValue().IsEmpty());
+ CcTest::global()->Set(v8_str("revoked"), reject_message.GetPromise());
+ CHECK(reject_message.GetValue().IsEmpty());
}
}
@@ -18384,7 +15407,10 @@ v8::Handle<v8::Value> RejectValue() {
void ResetPromiseStates() {
promise_reject_counter = 0;
promise_revoke_counter = 0;
+ promise_reject_msg_line_number = -1;
+ promise_reject_msg_column_number = -1;
promise_reject_line_number = -1;
+ promise_reject_column_number = -1;
promise_reject_frame_count = -1;
CcTest::global()->Set(v8_str("rejected"), v8_str(""));
CcTest::global()->Set(v8_str("value"), v8_str(""));
@@ -18613,6 +15639,9 @@ TEST(PromiseRejectCallback) {
CHECK_EQ(0, promise_revoke_counter);
CHECK_EQ(2, promise_reject_frame_count);
CHECK_EQ(3, promise_reject_line_number);
+ CHECK_EQ(5, promise_reject_column_number);
+ CHECK_EQ(3, promise_reject_msg_line_number);
+ CHECK_EQ(5, promise_reject_msg_column_number);
ResetPromiseStates();
@@ -18633,6 +15662,9 @@ TEST(PromiseRejectCallback) {
CHECK_EQ(0, promise_revoke_counter);
CHECK_EQ(2, promise_reject_frame_count);
CHECK_EQ(5, promise_reject_line_number);
+ CHECK_EQ(23, promise_reject_column_number);
+ CHECK_EQ(5, promise_reject_msg_line_number);
+ CHECK_EQ(23, promise_reject_msg_column_number);
// Throw in u3, which handles u1's rejection.
CompileRunWithOrigin(
@@ -18656,6 +15688,9 @@ TEST(PromiseRejectCallback) {
CHECK_EQ(2, promise_revoke_counter);
CHECK_EQ(3, promise_reject_frame_count);
CHECK_EQ(3, promise_reject_line_number);
+ CHECK_EQ(12, promise_reject_column_number);
+ CHECK_EQ(3, promise_reject_msg_line_number);
+ CHECK_EQ(12, promise_reject_msg_column_number);
ResetPromiseStates();
@@ -18675,6 +15710,28 @@ TEST(PromiseRejectCallback) {
CHECK_EQ(1, promise_revoke_counter);
CHECK_EQ(0, promise_reject_frame_count);
CHECK_EQ(-1, promise_reject_line_number);
+ CHECK_EQ(-1, promise_reject_column_number);
+
+ ResetPromiseStates();
+
+ // Create promise t1, which rejects by throwing syntax error from eval.
+ CompileRunWithOrigin(
+ "var t1 = new Promise( \n"
+ " function(res, rej) { \n"
+ " var content = '\\n\\\n"
+ " }'; \n"
+ " eval(content); \n"
+ " } \n"
+ "); \n",
+ "pro", 0, 0);
+ CHECK(!GetPromise("t1")->HasHandler());
+ CHECK_EQ(1, promise_reject_counter);
+ CHECK_EQ(0, promise_revoke_counter);
+ CHECK_EQ(2, promise_reject_frame_count);
+ CHECK_EQ(5, promise_reject_line_number);
+ CHECK_EQ(10, promise_reject_column_number);
+ CHECK_EQ(2, promise_reject_msg_line_number);
+ CHECK_EQ(7, promise_reject_msg_column_number);
}
@@ -18689,7 +15746,7 @@ void AnalyzeStackOfEvalWithSourceURL(
v8::Handle<v8::String> name =
stackTrace->GetFrame(i)->GetScriptNameOrSourceURL();
CHECK(!name.IsEmpty());
- CHECK_EQ(url, name);
+ CHECK(url->Equals(name));
}
}
@@ -18772,7 +15829,7 @@ void AnalyzeStackOfInlineScriptWithSourceURL(
v8::Handle<v8::String> name =
stackTrace->GetFrame(i)->GetScriptNameOrSourceURL();
CHECK(!name.IsEmpty());
- CHECK_EQ(url, name);
+ CHECK(url->Equals(name));
}
}
@@ -18818,7 +15875,7 @@ void AnalyzeStackOfDynamicScriptWithSourceURL(
v8::Handle<v8::String> name =
stackTrace->GetFrame(i)->GetScriptNameOrSourceURL();
CHECK(!name.IsEmpty());
- CHECK_EQ(url, name);
+ CHECK(url->Equals(name));
}
}
@@ -18896,7 +15953,7 @@ TEST(EvalWithSourceURLInMessageScriptResourceNameOrSourceURL) {
Local<v8::Message> message = try_catch.Message();
Handle<Value> sourceURL =
message->GetScriptOrigin().ResourceName();
- CHECK_EQ(*v8::String::Utf8Value(sourceURL), "source_url");
+ CHECK_EQ(0, strcmp(*v8::String::Utf8Value(sourceURL), "source_url"));
}
@@ -18920,7 +15977,7 @@ TEST(RecursionWithSourceURLInMessageScriptResourceNameOrSourceURL) {
Local<v8::Message> message = try_catch.Message();
Handle<Value> sourceURL =
message->GetScriptOrigin().ResourceName();
- CHECK_EQ(*v8::String::Utf8Value(sourceURL), "source_url");
+ CHECK_EQ(0, strcmp(*v8::String::Utf8Value(sourceURL), "source_url"));
}
@@ -19078,8 +16135,8 @@ THREADED_TEST(GetHeapStatistics) {
LocalContext c1;
v8::HandleScope scope(c1->GetIsolate());
v8::HeapStatistics heap_statistics;
- CHECK_EQ(static_cast<int>(heap_statistics.total_heap_size()), 0);
- CHECK_EQ(static_cast<int>(heap_statistics.used_heap_size()), 0);
+ CHECK_EQ(0u, heap_statistics.total_heap_size());
+ CHECK_EQ(0u, heap_statistics.used_heap_size());
c1->GetIsolate()->GetHeapStatistics(&heap_statistics);
CHECK_NE(static_cast<int>(heap_statistics.total_heap_size()), 0);
CHECK_NE(static_cast<int>(heap_statistics.used_heap_size()), 0);
@@ -19300,7 +16357,7 @@ static uint64_t DoubleToBits(double value) {
static double DoubleToDateTime(double input) {
double date_limit = 864e13;
if (std::isnan(input) || input < -date_limit || input > date_limit) {
- return v8::base::OS::nan_value();
+ return std::numeric_limits<double>::quiet_NaN();
}
return (input < 0) ? -(std::floor(-input)) : std::floor(input);
}
@@ -19532,8 +16589,11 @@ TEST(Regress528) {
THREADED_TEST(ScriptOrigin) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
+ v8::ScriptOrigin origin = v8::ScriptOrigin(
+ v8::String::NewFromUtf8(env->GetIsolate(), "test"),
+ v8::Integer::New(env->GetIsolate(), 1),
+ v8::Integer::New(env->GetIsolate(), 1), v8::True(env->GetIsolate()),
+ v8::Handle<v8::Integer>(), v8::True(env->GetIsolate()));
v8::Handle<v8::String> script = v8::String::NewFromUtf8(
env->GetIsolate(), "function f() {}\n\nfunction g() {}");
v8::Script::Compile(script, &origin)->Run();
@@ -19543,12 +16603,18 @@ THREADED_TEST(ScriptOrigin) {
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
v8::ScriptOrigin script_origin_f = f->GetScriptOrigin();
- CHECK_EQ("test", *v8::String::Utf8Value(script_origin_f.ResourceName()));
- CHECK_EQ(0, script_origin_f.ResourceLineOffset()->Int32Value());
+ CHECK_EQ(0, strcmp("test",
+ *v8::String::Utf8Value(script_origin_f.ResourceName())));
+ CHECK_EQ(1, script_origin_f.ResourceLineOffset()->Int32Value());
+ CHECK(script_origin_f.ResourceIsSharedCrossOrigin()->Value());
+ CHECK(script_origin_f.ResourceIsEmbedderDebugScript()->Value());
v8::ScriptOrigin script_origin_g = g->GetScriptOrigin();
- CHECK_EQ("test", *v8::String::Utf8Value(script_origin_g.ResourceName()));
- CHECK_EQ(0, script_origin_g.ResourceLineOffset()->Int32Value());
+ CHECK_EQ(0, strcmp("test",
+ *v8::String::Utf8Value(script_origin_g.ResourceName())));
+ CHECK_EQ(1, script_origin_g.ResourceLineOffset()->Int32Value());
+ CHECK(script_origin_g.ResourceIsSharedCrossOrigin()->Value());
+ CHECK(script_origin_g.ResourceIsEmbedderDebugScript()->Value());
}
@@ -19563,7 +16629,8 @@ THREADED_TEST(FunctionGetInferredName) {
v8::Script::Compile(script, &origin)->Run();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
- CHECK_EQ("foo.bar.baz", *v8::String::Utf8Value(f->GetInferredName()));
+ CHECK_EQ(0,
+ strcmp("foo.bar.baz", *v8::String::Utf8Value(f->GetInferredName())));
}
@@ -19624,13 +16691,14 @@ THREADED_TEST(FunctionGetDisplayName) {
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
CHECK_EQ(false, error->BooleanValue());
- CHECK_EQ("display_a", *v8::String::Utf8Value(a->GetDisplayName()));
- CHECK_EQ("display_b", *v8::String::Utf8Value(b->GetDisplayName()));
+ CHECK_EQ(0, strcmp("display_a", *v8::String::Utf8Value(a->GetDisplayName())));
+ CHECK_EQ(0, strcmp("display_b", *v8::String::Utf8Value(b->GetDisplayName())));
CHECK(c->GetDisplayName()->IsUndefined());
CHECK(d->GetDisplayName()->IsUndefined());
CHECK(e->GetDisplayName()->IsUndefined());
CHECK(f->GetDisplayName()->IsUndefined());
- CHECK_EQ("set_in_runtime", *v8::String::Utf8Value(g->GetDisplayName()));
+ CHECK_EQ(
+ 0, strcmp("set_in_runtime", *v8::String::Utf8Value(g->GetDisplayName())));
}
@@ -19730,7 +16798,7 @@ THREADED_TEST(FunctionGetBoundFunction) {
CHECK(g->GetBoundFunction()->IsFunction());
Local<v8::Function> original_function = Local<v8::Function>::Cast(
g->GetBoundFunction());
- CHECK_EQ(f->GetName(), original_function->GetName());
+ CHECK(f->GetName()->Equals(original_function->GetName()));
CHECK_EQ(f->GetScriptLineNumber(), original_function->GetScriptLineNumber());
CHECK_EQ(f->GetScriptColumnNumber(),
original_function->GetScriptColumnNumber());
@@ -20465,8 +17533,8 @@ TEST(IsolateNewDispose) {
v8::V8::SetFatalErrorHandler(StoringErrorCallback);
last_location = last_message = NULL;
isolate->Dispose();
- CHECK_EQ(last_location, NULL);
- CHECK_EQ(last_message, NULL);
+ CHECK(!last_location);
+ CHECK(!last_message);
}
@@ -20482,8 +17550,8 @@ UNINITIALIZED_TEST(DisposeIsolateWhenInUse) {
last_location = last_message = NULL;
// Still entered, should fail.
isolate->Dispose();
- CHECK_NE(last_location, NULL);
- CHECK_NE(last_message, NULL);
+ CHECK(last_location);
+ CHECK(last_message);
}
isolate->Dispose();
}
@@ -20596,12 +17664,12 @@ TEST(RunTwoIsolatesOnSingleThread) {
last_location = last_message = NULL;
isolate1->Dispose();
- CHECK_EQ(last_location, NULL);
- CHECK_EQ(last_message, NULL);
+ CHECK(!last_location);
+ CHECK(!last_message);
isolate2->Dispose();
- CHECK_EQ(last_location, NULL);
- CHECK_EQ(last_message, NULL);
+ CHECK(!last_location);
+ CHECK(!last_message);
// Check that default isolate still runs.
{
@@ -20899,62 +17967,6 @@ TEST(DontDeleteCellLoadIC) {
}
-TEST(DontDeleteCellLoadICForceDelete) {
- const char* function_code =
- "function readCell() { while (true) { return cell; } }";
-
- // Run the code twice to initialize the load IC for a don't delete
- // cell.
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- CompileRun("var cell = \"value\";");
- ExpectBoolean("delete cell", false);
- CompileRun(function_code);
- ExpectString("readCell()", "value");
- ExpectString("readCell()", "value");
-
- // Delete the cell using the API and check the inlined code works
- // correctly.
- CHECK(context->Global()->ForceDelete(v8_str("cell")));
- ExpectString("(function() {"
- " try {"
- " return readCell();"
- " } catch(e) {"
- " return e.toString();"
- " }"
- "})()",
- "ReferenceError: cell is not defined");
-}
-
-
-TEST(DontDeleteCellLoadICAPI) {
- const char* function_code =
- "function readCell() { while (true) { return cell; } }";
-
- // Run the code twice to initialize the load IC for a don't delete
- // cell created using the API.
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- context->Global()->ForceSet(v8_str("cell"), v8_str("value"), v8::DontDelete);
- ExpectBoolean("delete cell", false);
- CompileRun(function_code);
- ExpectString("readCell()", "value");
- ExpectString("readCell()", "value");
-
- // Delete the cell using the API and check the inlined code works
- // correctly.
- CHECK(context->Global()->ForceDelete(v8_str("cell")));
- ExpectString("(function() {"
- " try {"
- " return readCell();"
- " } catch(e) {"
- " return e.toString();"
- " }"
- "})()",
- "ReferenceError: cell is not defined");
-}
-
-
class Visitor42 : public v8::PersistentHandleVisitor {
public:
explicit Visitor42(v8::Persistent<v8::Object>* object)
@@ -20970,7 +17982,7 @@ class Visitor42 : public v8::PersistentHandleVisitor {
v8::Handle<v8::Value> object =
v8::Local<v8::Object>::New(isolate, *object_);
CHECK(handle->IsObject());
- CHECK_EQ(Handle<Object>::Cast(handle), object);
+ CHECK(Handle<Object>::Cast(handle)->Equals(object));
++counter_;
}
@@ -21148,8 +18160,8 @@ TEST(NamedEnumeratorAndForIn) {
context->Global()->Set(v8_str("o"), tmpl->NewInstance());
v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
"var result = []; for (var k in o) result.push(k); result"));
- CHECK_EQ(1, result->Length());
- CHECK_EQ(v8_str("universalAnswer"), result->Get(0));
+ CHECK_EQ(1u, result->Length());
+ CHECK(v8_str("universalAnswer")->Equals(result->Get(0)));
}
@@ -21535,15 +18547,15 @@ THREADED_TEST(ReadOnlyIndexedProperties) {
context->Global()->Set(v8_str("obj"), obj);
obj->ForceSet(v8_str("1"), v8_str("DONT_CHANGE"), v8::ReadOnly);
obj->Set(v8_str("1"), v8_str("foobar"));
- CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_str("1")));
+ CHECK(v8_str("DONT_CHANGE")->Equals(obj->Get(v8_str("1"))));
obj->ForceSet(v8_num(2), v8_str("DONT_CHANGE"), v8::ReadOnly);
obj->Set(v8_num(2), v8_str("foobar"));
- CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_num(2)));
+ CHECK(v8_str("DONT_CHANGE")->Equals(obj->Get(v8_num(2))));
// Test non-smi case.
obj->ForceSet(v8_str("2000000000"), v8_str("DONT_CHANGE"), v8::ReadOnly);
obj->Set(v8_str("2000000000"), v8_str("foobar"));
- CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_str("2000000000")));
+ CHECK(v8_str("DONT_CHANGE")->Equals(obj->Get(v8_str("2000000000"))));
}
@@ -21691,30 +18703,6 @@ THREADED_TEST(Regress93759) {
}
-THREADED_TEST(Regress125988) {
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> intercept = FunctionTemplate::New(CcTest::isolate());
- AddInterceptor(intercept, EmptyInterceptorGetter, EmptyInterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Intercept"), intercept->GetFunction());
- CompileRun("var a = new Object();"
- "var b = new Intercept();"
- "var c = new Object();"
- "c.__proto__ = b;"
- "b.__proto__ = a;"
- "a.x = 23;"
- "for (var i = 0; i < 3; i++) c.x;");
- ExpectBoolean("c.hasOwnProperty('x')", false);
- ExpectInt32("c.x", 23);
- CompileRun("a.y = 42;"
- "for (var i = 0; i < 3; i++) c.x;");
- ExpectBoolean("c.hasOwnProperty('x')", false);
- ExpectInt32("c.x", 23);
- ExpectBoolean("c.hasOwnProperty('y')", false);
- ExpectInt32("c.y", 42);
-}
-
-
static void TestReceiver(Local<Value> expected_result,
Local<Value> expected_receiver,
const char* code) {
@@ -22000,7 +18988,7 @@ TEST(EnqueueMicrotask) {
g_passed_to_three = NULL;
env->GetIsolate()->EnqueueMicrotask(MicrotaskThree);
CompileRun("1+1;");
- CHECK_EQ(NULL, g_passed_to_three);
+ CHECK(!g_passed_to_three);
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
@@ -22291,8 +19279,8 @@ UNINITIALIZED_TEST(IsolateEmbedderData) {
isolate->Enter();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
- CHECK_EQ(NULL, isolate->GetData(slot));
- CHECK_EQ(NULL, i_isolate->GetData(slot));
+ CHECK(!isolate->GetData(slot));
+ CHECK(!i_isolate->GetData(slot));
}
for (uint32_t slot = 0; slot < v8::Isolate::GetNumberOfDataSlots(); ++slot) {
void* data = reinterpret_cast<void*>(0xacce55ed + slot);
@@ -22331,7 +19319,7 @@ static int instance_checked_getter_count = 0;
static void InstanceCheckedGetter(
Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK_EQ(name, v8_str("foo"));
+ CHECK(name->Equals(v8_str("foo")));
instance_checked_getter_count++;
info.GetReturnValue().Set(v8_num(11));
}
@@ -22341,8 +19329,8 @@ static int instance_checked_setter_count = 0;
static void InstanceCheckedSetter(Local<String> name,
Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- CHECK_EQ(name, v8_str("foo"));
- CHECK_EQ(value, v8_num(23));
+ CHECK(name->Equals(v8_str("foo")));
+ CHECK(value->Equals(v8_num(23)));
instance_checked_setter_count++;
}
@@ -22434,6 +19422,15 @@ THREADED_TEST(InstanceCheckOnInstanceAccessor) {
}
+static void EmptyInterceptorGetter(
+ Local<String> name, const v8::PropertyCallbackInfo<v8::Value>& info) {}
+
+
+static void EmptyInterceptorSetter(
+ Local<String> name, Local<Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {}
+
+
THREADED_TEST(InstanceCheckOnInstanceAccessorWithInterceptor) {
v8::internal::FLAG_allow_natives_syntax = true;
LocalContext context;
@@ -22441,7 +19438,8 @@ THREADED_TEST(InstanceCheckOnInstanceAccessorWithInterceptor) {
Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
Local<ObjectTemplate> inst = templ->InstanceTemplate();
- AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ templ->InstanceTemplate()->SetNamedPropertyHandler(EmptyInterceptorGetter,
+ EmptyInterceptorSetter);
inst->SetAccessor(v8_str("foo"),
InstanceCheckedGetter, InstanceCheckedSetter,
Handle<Value>(),
@@ -22716,16 +19714,6 @@ THREADED_TEST(Regress137496) {
}
-THREADED_TEST(Regress149912) {
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
- AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
- context->Global()->Set(v8_str("Bug"), templ->GetFunction());
- CompileRun("Number.prototype.__proto__ = new Bug; var x = 0; x.foo();");
-}
-
-
THREADED_TEST(Regress157124) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -23043,111 +20031,6 @@ TEST(AccessCheckThrows) {
}
-THREADED_TEST(Regress256330) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
- AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
- context->Global()->Set(v8_str("Bug"), templ->GetFunction());
- CompileRun("\"use strict\"; var o = new Bug;"
- "function f(o) { o.x = 10; };"
- "f(o); f(o); f(o);"
- "%OptimizeFunctionOnNextCall(f);"
- "f(o);");
- ExpectBoolean("%GetOptimizationStatus(f) != 2", true);
-}
-
-
-THREADED_TEST(CrankshaftInterceptorSetter) {
- i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
- AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
- CompileRun("var obj = new Obj;"
- // Initialize fields to avoid transitions later.
- "obj.age = 0;"
- "obj.accessor_age = 42;"
- "function setter(i) { this.accessor_age = i; };"
- "function getter() { return this.accessor_age; };"
- "function setAge(i) { obj.age = i; };"
- "Object.defineProperty(obj, 'age', { get:getter, set:setter });"
- "setAge(1);"
- "setAge(2);"
- "setAge(3);"
- "%OptimizeFunctionOnNextCall(setAge);"
- "setAge(4);");
- // All stores went through the interceptor.
- ExpectInt32("obj.interceptor_age", 4);
- ExpectInt32("obj.accessor_age", 42);
-}
-
-
-THREADED_TEST(CrankshaftInterceptorGetter) {
- i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
- AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
- CompileRun("var obj = new Obj;"
- // Initialize fields to avoid transitions later.
- "obj.age = 1;"
- "obj.accessor_age = 42;"
- "function getter() { return this.accessor_age; };"
- "function getAge() { return obj.interceptor_age; };"
- "Object.defineProperty(obj, 'interceptor_age', { get:getter });"
- "getAge();"
- "getAge();"
- "getAge();"
- "%OptimizeFunctionOnNextCall(getAge);");
- // Access through interceptor.
- ExpectInt32("getAge()", 1);
-}
-
-
-THREADED_TEST(CrankshaftInterceptorFieldRead) {
- i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
- AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
- CompileRun("var obj = new Obj;"
- "obj.__proto__.interceptor_age = 42;"
- "obj.age = 100;"
- "function getAge() { return obj.interceptor_age; };");
- ExpectInt32("getAge();", 100);
- ExpectInt32("getAge();", 100);
- ExpectInt32("getAge();", 100);
- CompileRun("%OptimizeFunctionOnNextCall(getAge);");
- // Access through interceptor.
- ExpectInt32("getAge();", 100);
-}
-
-
-THREADED_TEST(CrankshaftInterceptorFieldWrite) {
- i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
- AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
- CompileRun("var obj = new Obj;"
- "obj.age = 100000;"
- "function setAge(i) { obj.age = i };"
- "setAge(100);"
- "setAge(101);"
- "setAge(102);"
- "%OptimizeFunctionOnNextCall(setAge);"
- "setAge(103);");
- ExpectInt32("obj.age", 100000);
- ExpectInt32("obj.interceptor_age", 103);
-}
-
-
class RequestInterruptTestBase {
public:
RequestInterruptTestBase()
@@ -23469,7 +20352,7 @@ TEST(RequestMultipleInterrupts) { RequestMultipleInterrupts().RunTest(); }
static Local<Value> function_new_expected_env;
static void FunctionNewCallback(const v8::FunctionCallbackInfo<Value>& info) {
- CHECK_EQ(function_new_expected_env, info.Data());
+ CHECK(function_new_expected_env->Equals(info.Data()));
info.GetReturnValue().Set(17);
}
@@ -23483,25 +20366,25 @@ THREADED_TEST(FunctionNew) {
Local<Function> func = Function::New(isolate, FunctionNewCallback, data);
env->Global()->Set(v8_str("func"), func);
Local<Value> result = CompileRun("func();");
- CHECK_EQ(v8::Integer::New(isolate, 17), result);
+ CHECK(v8::Integer::New(isolate, 17)->Equals(result));
// Verify function not cached
int serial_number =
i::Smi::cast(v8::Utils::OpenHandle(*func)
->shared()->get_api_func_data()->serial_number())->value();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::JSObject> cache(i_isolate->native_context()->function_cache());
- i::Handle<i::Object> elm =
- i::Object::GetElement(i_isolate, cache, serial_number).ToHandleChecked();
- CHECK(elm->IsUndefined());
+ i::Handle<i::FixedArray> cache(i_isolate->native_context()->function_cache());
+ if (serial_number < cache->length()) {
+ CHECK(cache->get(serial_number)->IsUndefined());
+ }
// Verify that each Function::New creates a new function instance
Local<Object> data2 = v8::Object::New(isolate);
function_new_expected_env = data2;
Local<Function> func2 = Function::New(isolate, FunctionNewCallback, data2);
CHECK(!func2->IsNull());
- CHECK_NE(func, func2);
+ CHECK(!func->Equals(func2));
env->Global()->Set(v8_str("func2"), func2);
Local<Value> result2 = CompileRun("func2();");
- CHECK_EQ(v8::Integer::New(isolate, 17), result2);
+ CHECK(v8::Integer::New(isolate, 17)->Equals(result2));
}
@@ -23519,7 +20402,7 @@ TEST(EscapeableHandleScope) {
for (int i = 0; i < runs; i++) {
Local<String> expected;
if (i != 0) {
- CHECK_EQ(v8_str("escape value"), values[i]);
+ CHECK(v8_str("escape value")->Equals(values[i]));
} else {
CHECK(values[i].IsEmpty());
}
@@ -23565,7 +20448,7 @@ class ApiCallOptimizationChecker {
CHECK(data == info.Data());
CHECK(receiver == info.This());
if (info.Length() == 1) {
- CHECK_EQ(v8_num(1), info[0]);
+ CHECK(v8_num(1)->Equals(info[0]));
}
CHECK(holder == info.Holder());
count++;
@@ -23728,13 +20611,203 @@ Local<Object> ApiCallOptimizationChecker::callee;
int ApiCallOptimizationChecker::count = 0;
-TEST(TestFunctionCallOptimization) {
+TEST(FunctionCallOptimization) {
i::FLAG_allow_natives_syntax = true;
ApiCallOptimizationChecker checker;
checker.RunAll();
}
+TEST(FunctionCallOptimizationMultipleArgs) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Handle<Object> global = context->Global();
+ Local<v8::Function> function = Function::New(isolate, Returns42);
+ global->Set(v8_str("x"), function);
+ CompileRun(
+ "function x_wrap() {\n"
+ " for (var i = 0; i < 5; i++) {\n"
+ " x(1,2,3);\n"
+ " }\n"
+ "}\n"
+ "x_wrap();\n"
+ "%OptimizeFunctionOnNextCall(x_wrap);"
+ "x_wrap();\n");
+}
+
+
+static void ReturnsSymbolCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8::Symbol::New(info.GetIsolate()));
+}
+
+
+TEST(ApiCallbackCanReturnSymbols) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Handle<Object> global = context->Global();
+ Local<v8::Function> function = Function::New(isolate, ReturnsSymbolCallback);
+ global->Set(v8_str("x"), function);
+ CompileRun(
+ "function x_wrap() {\n"
+ " for (var i = 0; i < 5; i++) {\n"
+ " x();\n"
+ " }\n"
+ "}\n"
+ "x_wrap();\n"
+ "%OptimizeFunctionOnNextCall(x_wrap);"
+ "x_wrap();\n");
+}
+
+
+TEST(EmptyApiCallback) {
+ LocalContext context;
+ auto isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ auto global = context->Global();
+ auto function = FunctionTemplate::New(isolate)->GetFunction();
+ global->Set(v8_str("x"), function);
+
+ auto result = CompileRun("x()");
+ CHECK(v8::Utils::OpenHandle(*result)->IsJSGlobalProxy());
+
+ result = CompileRun("x(1,2,3)");
+ CHECK(v8::Utils::OpenHandle(*result)->IsJSGlobalProxy());
+
+ result = CompileRun("7 + x.call(3) + 11");
+ CHECK(result->IsInt32());
+ CHECK_EQ(21, result->Int32Value());
+
+ result = CompileRun("7 + x.call(3, 101, 102, 103, 104) + 11");
+ CHECK(result->IsInt32());
+ CHECK_EQ(21, result->Int32Value());
+
+ result = CompileRun("var y = []; x.call(y)");
+ CHECK(result->IsArray());
+
+ result = CompileRun("x.call(y, 1, 2, 3, 4)");
+ CHECK(result->IsArray());
+}
+
+
+TEST(SimpleSignatureCheck) {
+ LocalContext context;
+ auto isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ auto global = context->Global();
+ auto sig_obj = FunctionTemplate::New(isolate);
+ auto sig = v8::Signature::New(isolate, sig_obj);
+ auto x = FunctionTemplate::New(isolate, Returns42, Handle<Value>(), sig);
+ global->Set(v8_str("sig_obj"), sig_obj->GetFunction());
+ global->Set(v8_str("x"), x->GetFunction());
+ CompileRun("var s = new sig_obj();");
+ {
+ TryCatch try_catch(isolate);
+ CompileRun("x()");
+ CHECK(try_catch.HasCaught());
+ }
+ {
+ TryCatch try_catch(isolate);
+ CompileRun("x.call(1)");
+ CHECK(try_catch.HasCaught());
+ }
+ {
+ TryCatch try_catch(isolate);
+ auto result = CompileRun("s.x = x; s.x()");
+ CHECK(!try_catch.HasCaught());
+ CHECK_EQ(42, result->Int32Value());
+ }
+ {
+ TryCatch try_catch(isolate);
+ auto result = CompileRun("x.call(s)");
+ CHECK(!try_catch.HasCaught());
+ CHECK_EQ(42, result->Int32Value());
+ }
+}
+
+
+TEST(ChainSignatureCheck) {
+ LocalContext context;
+ auto isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ auto global = context->Global();
+ auto sig_obj = FunctionTemplate::New(isolate);
+ auto sig = v8::Signature::New(isolate, sig_obj);
+ for (int i = 0; i < 4; ++i) {
+ auto temp = FunctionTemplate::New(isolate);
+ temp->Inherit(sig_obj);
+ sig_obj = temp;
+ }
+ auto x = FunctionTemplate::New(isolate, Returns42, Handle<Value>(), sig);
+ global->Set(v8_str("sig_obj"), sig_obj->GetFunction());
+ global->Set(v8_str("x"), x->GetFunction());
+ CompileRun("var s = new sig_obj();");
+ {
+ TryCatch try_catch(isolate);
+ CompileRun("x()");
+ CHECK(try_catch.HasCaught());
+ }
+ {
+ TryCatch try_catch(isolate);
+ CompileRun("x.call(1)");
+ CHECK(try_catch.HasCaught());
+ }
+ {
+ TryCatch try_catch(isolate);
+ auto result = CompileRun("s.x = x; s.x()");
+ CHECK(!try_catch.HasCaught());
+ CHECK_EQ(42, result->Int32Value());
+ }
+ {
+ TryCatch try_catch(isolate);
+ auto result = CompileRun("x.call(s)");
+ CHECK(!try_catch.HasCaught());
+ CHECK_EQ(42, result->Int32Value());
+ }
+}
+
+
+TEST(PrototypeSignatureCheck) {
+ LocalContext context;
+ auto isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ auto global = context->Global();
+ auto sig_obj = FunctionTemplate::New(isolate);
+ sig_obj->SetHiddenPrototype(true);
+ auto sig = v8::Signature::New(isolate, sig_obj);
+ auto x = FunctionTemplate::New(isolate, Returns42, Handle<Value>(), sig);
+ global->Set(v8_str("sig_obj"), sig_obj->GetFunction());
+ global->Set(v8_str("x"), x->GetFunction());
+ CompileRun("s = {}; s.__proto__ = new sig_obj();");
+ {
+ TryCatch try_catch(isolate);
+ CompileRun("x()");
+ CHECK(try_catch.HasCaught());
+ }
+ {
+ TryCatch try_catch(isolate);
+ CompileRun("x.call(1)");
+ CHECK(try_catch.HasCaught());
+ }
+ {
+ TryCatch try_catch(isolate);
+ auto result = CompileRun("s.x = x; s.x()");
+ CHECK(!try_catch.HasCaught());
+ CHECK_EQ(42, result->Int32Value());
+ }
+ {
+ TryCatch try_catch(isolate);
+ auto result = CompileRun("x.call(s)");
+ CHECK(!try_catch.HasCaught());
+ CHECK_EQ(42, result->Int32Value());
+ }
+}
+
+
static const char* last_event_message;
static int last_event_status;
void StoringEventLoggerCallback(const char* message, int status) {
@@ -23747,13 +20820,13 @@ TEST(EventLogging) {
v8::Isolate* isolate = CcTest::isolate();
isolate->SetEventLogger(StoringEventLoggerCallback);
v8::internal::HistogramTimer histogramTimer(
- "V8.Test", 0, 10000, 50,
+ "V8.Test", 0, 10000, v8::internal::HistogramTimer::MILLISECOND, 50,
reinterpret_cast<v8::internal::Isolate*>(isolate));
histogramTimer.Start();
- CHECK_EQ("V8.Test", last_event_message);
+ CHECK_EQ(0, strcmp("V8.Test", last_event_message));
CHECK_EQ(0, last_event_status);
histogramTimer.Stop();
- CHECK_EQ("V8.Test", last_event_message);
+ CHECK_EQ(0, strcmp("V8.Test", last_event_message));
CHECK_EQ(1, last_event_status);
}
@@ -23886,7 +20959,7 @@ TEST(PromiseThen) {
CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
isolate->RunMicrotasks();
CHECK(!global->Get(v8_str("x1"))->IsNumber());
- CHECK_EQ(p, global->Get(v8_str("x1")));
+ CHECK(p->Equals(global->Get(v8_str("x1"))));
// Then
CompileRun("x1 = x2 = 0;");
@@ -24026,7 +21099,7 @@ TEST(ScriptNameAndLineNumber) {
CHECK(!script_name.IsEmpty());
CHECK(script_name->IsString());
String::Utf8Value utf8_name(script_name);
- CHECK_EQ(url, *utf8_name);
+ CHECK_EQ(0, strcmp(url, *utf8_name));
int line_number = script->GetUnboundScript()->GetLineNumber(0);
CHECK_EQ(13, line_number);
}
@@ -24035,14 +21108,14 @@ void CheckMagicComments(Handle<Script> script, const char* expected_source_url,
const char* expected_source_mapping_url) {
if (expected_source_url != NULL) {
v8::String::Utf8Value url(script->GetUnboundScript()->GetSourceURL());
- CHECK_EQ(expected_source_url, *url);
+ CHECK_EQ(0, strcmp(expected_source_url, *url));
} else {
CHECK(script->GetUnboundScript()->GetSourceURL()->IsUndefined());
}
if (expected_source_mapping_url != NULL) {
v8::String::Utf8Value url(
script->GetUnboundScript()->GetSourceMappingURL());
- CHECK_EQ(expected_source_mapping_url, *url);
+ CHECK_EQ(0, strcmp(expected_source_mapping_url, *url));
} else {
CHECK(script->GetUnboundScript()->GetSourceMappingURL()->IsUndefined());
}
@@ -24133,16 +21206,16 @@ TEST(GetOwnPropertyDescriptor) {
Local<Value> desc = x->GetOwnPropertyDescriptor(v8_str("no_prop"));
CHECK(desc->IsUndefined());
desc = x->GetOwnPropertyDescriptor(v8_str("p0"));
- CHECK_EQ(v8_num(12), Local<Object>::Cast(desc)->Get(v8_str("value")));
+ CHECK(v8_num(12)->Equals(Local<Object>::Cast(desc)->Get(v8_str("value"))));
desc = x->GetOwnPropertyDescriptor(v8_str("p1"));
Local<Function> set =
Local<Function>::Cast(Local<Object>::Cast(desc)->Get(v8_str("set")));
Local<Function> get =
Local<Function>::Cast(Local<Object>::Cast(desc)->Get(v8_str("get")));
- CHECK_EQ(v8_num(13), get->Call(x, 0, NULL));
+ CHECK(v8_num(13)->Equals(get->Call(x, 0, NULL)));
Handle<Value> args[] = { v8_num(14) };
set->Call(x, 1, args);
- CHECK_EQ(v8_num(14), get->Call(x, 0, NULL));
+ CHECK(v8_num(14)->Equals(get->Call(x, 0, NULL)));
}
@@ -24263,12 +21336,11 @@ void RunStreamingTest(const char** chunks,
task->Run();
delete task;
- v8::ScriptOrigin origin(v8_str("http://foo.com"));
- char* full_source = TestSourceStream::FullSourceString(chunks);
-
- // The possible errors are only produced while compiling.
+ // Possible errors are only produced while compiling.
CHECK_EQ(false, try_catch.HasCaught());
+ v8::ScriptOrigin origin(v8_str("http://foo.com"));
+ char* full_source = TestSourceStream::FullSourceString(chunks);
v8::Handle<Script> script = v8::ScriptCompiler::Compile(
isolate, &source, v8_str(full_source), origin);
if (expected_success) {
@@ -24509,6 +21581,45 @@ TEST(StreamingProducesParserCache) {
}
+TEST(StreamingWithDebuggingDoesNotProduceParserCache) {
+ // If the debugger is active, we should just not produce parser cache at
+ // all. This is a regeression test: We used to produce a parser cache without
+ // any data in it (just headers).
+ i::FLAG_min_preparse_length = 0;
+ const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
+ NULL};
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // Make the debugger active by setting a breakpoint.
+ CompileRun("function break_here() { }");
+ i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*env->Global()->Get(v8_str("break_here"))));
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
+ int position = 0;
+ debug->SetBreakPoint(func, i::Handle<i::Object>(v8::internal::Smi::FromInt(1),
+ CcTest::i_isolate()),
+ &position);
+
+ v8::ScriptCompiler::StreamedSource source(
+ new TestSourceStream(chunks),
+ v8::ScriptCompiler::StreamedSource::ONE_BYTE);
+ v8::ScriptCompiler::ScriptStreamingTask* task =
+ v8::ScriptCompiler::StartStreamingScript(
+ isolate, &source, v8::ScriptCompiler::kProduceParserCache);
+
+ // TestSourceStream::GetMoreData won't block, so it's OK to just run the
+ // task here in the main thread.
+ task->Run();
+ delete task;
+
+ // Check that we got no cached data.
+ CHECK(source.GetCachedData() == NULL);
+}
+
+
TEST(StreamingScriptWithInvalidUtf8) {
// Regression test for a crash: test that invalid UTF-8 bytes in the end of a
// chunk don't produce a crash.
@@ -24571,6 +21682,48 @@ TEST(StreamingUtf8ScriptWithMultipleMultibyteCharactersSomeSplit2) {
}
+TEST(StreamingWithHarmonyScopes) {
+ // Don't use RunStreamingTest here so that both scripts get to use the same
+ // LocalContext and HandleScope.
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // First, run a script with a let variable.
+ CompileRun("\"use strict\"; let x = 1;");
+
+ // Then stream a script which (erroneously) tries to introduce the same
+ // variable again.
+ const char* chunks[] = {"\"use strict\"; let x = 2;", NULL};
+
+ v8::TryCatch try_catch;
+ v8::ScriptCompiler::StreamedSource source(
+ new TestSourceStream(chunks),
+ v8::ScriptCompiler::StreamedSource::ONE_BYTE);
+ v8::ScriptCompiler::ScriptStreamingTask* task =
+ v8::ScriptCompiler::StartStreamingScript(isolate, &source);
+ task->Run();
+ delete task;
+
+ // Parsing should succeed (the script will be parsed and compiled in a context
+ // independent way, so the error is not detected).
+ CHECK_EQ(false, try_catch.HasCaught());
+
+ v8::ScriptOrigin origin(v8_str("http://foo.com"));
+ char* full_source = TestSourceStream::FullSourceString(chunks);
+ v8::Handle<Script> script = v8::ScriptCompiler::Compile(
+ isolate, &source, v8_str(full_source), origin);
+ CHECK(!script.IsEmpty());
+ CHECK_EQ(false, try_catch.HasCaught());
+
+ // Running the script exposes the error.
+ v8::Handle<Value> result(script->Run());
+ CHECK(result.IsEmpty());
+ CHECK(try_catch.HasCaught());
+ delete[] full_source;
+}
+
+
void TestInvalidCacheData(v8::ScriptCompiler::CompileOptions option) {
const char* garbage = "garbage garbage garbage garbage garbage garbage";
const uint8_t* data = reinterpret_cast<const uint8_t*>(garbage);
@@ -24791,3 +21944,37 @@ TEST(StreamingScriptWithSourceMappingURLInTheMiddle) {
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true, NULL,
"bar2.js");
}
+
+
+TEST(NewStringRangeError) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext env;
+ const int length = i::String::kMaxLength + 1;
+ const int buffer_size = length * sizeof(uint16_t);
+ void* buffer = malloc(buffer_size);
+ if (buffer == NULL) return;
+ memset(buffer, 'A', buffer_size);
+ {
+ v8::TryCatch try_catch;
+ char* data = reinterpret_cast<char*>(buffer);
+ CHECK(v8::String::NewFromUtf8(isolate, data, v8::String::kNormalString,
+ length).IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+ {
+ v8::TryCatch try_catch;
+ uint8_t* data = reinterpret_cast<uint8_t*>(buffer);
+ CHECK(v8::String::NewFromOneByte(isolate, data, v8::String::kNormalString,
+ length).IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+ {
+ v8::TryCatch try_catch;
+ uint16_t* data = reinterpret_cast<uint16_t*>(buffer);
+ CHECK(v8::String::NewFromTwoByte(isolate, data, v8::String::kNormalString,
+ length).IsEmpty());
+ CHECK(try_catch.HasCaught());
+ }
+ free(buffer);
+}
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
new file mode 100644
index 0000000000..17e0711af5
--- /dev/null
+++ b/deps/v8/test/cctest/test-api.h
@@ -0,0 +1,34 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/cpu-profiler.h"
+#include "src/isolate.h"
+#include "src/vm-state.h"
+#include "test/cctest/cctest.h"
+
+template <typename T>
+static void CheckReturnValue(const T& t, i::Address callback) {
+ v8::ReturnValue<v8::Value> rv = t.GetReturnValue();
+ i::Object** o = *reinterpret_cast<i::Object***>(&rv);
+ CHECK_EQ(CcTest::isolate(), t.GetIsolate());
+ CHECK_EQ(t.GetIsolate(), rv.GetIsolate());
+ CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
+ // Verify reset
+ bool is_runtime = (*o)->IsTheHole();
+ rv.Set(true);
+ CHECK(!(*o)->IsTheHole() && !(*o)->IsUndefined());
+ rv.Set(v8::Handle<v8::Object>());
+ CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
+ CHECK_EQ(is_runtime, (*o)->IsTheHole());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(t.GetIsolate());
+ // If CPU profiler is active check that when API callback is invoked
+ // VMState is set to EXTERNAL.
+ if (isolate->cpu_profiler()->is_profiling()) {
+ CHECK_EQ(v8::EXTERNAL, isolate->current_vm_state());
+ CHECK(isolate->external_callback_scope());
+ CHECK_EQ(callback, isolate->external_callback_scope()->callback());
+ }
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 2bcf022482..526d4567fa 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -987,10 +987,10 @@ TEST(11) {
Object* dummy = CALL_GENERATED_CODE(f, &i, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0xabcd0001, i.a);
+ CHECK_EQ(static_cast<int32_t>(0xabcd0001), i.a);
CHECK_EQ(static_cast<int32_t>(0xabcd0000) >> 1, i.b);
CHECK_EQ(0x00000000, i.c);
- CHECK_EQ(0xffffffff, i.d);
+ CHECK_EQ(static_cast<int32_t>(0xffffffff), i.d);
}
@@ -1129,8 +1129,8 @@ TEST(13) {
CHECK_EQ(14.7610017472335499, t.i);
CHECK_EQ(16.0, t.j);
CHECK_EQ(73.8818412254460241, t.k);
- CHECK_EQ(372106121, t.low);
- CHECK_EQ(1079146608, t.high);
+ CHECK_EQ(372106121u, t.low);
+ CHECK_EQ(1079146608u, t.high);
}
}
@@ -1321,22 +1321,22 @@ TEST(15) {
t.dstA7 = 0;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0x01020304, t.dst0);
- CHECK_EQ(0x11121314, t.dst1);
- CHECK_EQ(0x21222324, t.dst2);
- CHECK_EQ(0x31323334, t.dst3);
- CHECK_EQ(0x41424344, t.dst4);
- CHECK_EQ(0x51525354, t.dst5);
- CHECK_EQ(0x61626364, t.dst6);
- CHECK_EQ(0x71727374, t.dst7);
- CHECK_EQ(0x00430044, t.dstA0);
- CHECK_EQ(0x00410042, t.dstA1);
- CHECK_EQ(0x00830084, t.dstA2);
- CHECK_EQ(0x00810082, t.dstA3);
- CHECK_EQ(0x00430044, t.dstA4);
- CHECK_EQ(0x00410042, t.dstA5);
- CHECK_EQ(0x00830084, t.dstA6);
- CHECK_EQ(0x00810082, t.dstA7);
+ CHECK_EQ(0x01020304u, t.dst0);
+ CHECK_EQ(0x11121314u, t.dst1);
+ CHECK_EQ(0x21222324u, t.dst2);
+ CHECK_EQ(0x31323334u, t.dst3);
+ CHECK_EQ(0x41424344u, t.dst4);
+ CHECK_EQ(0x51525354u, t.dst5);
+ CHECK_EQ(0x61626364u, t.dst6);
+ CHECK_EQ(0x71727374u, t.dst7);
+ CHECK_EQ(0x00430044u, t.dstA0);
+ CHECK_EQ(0x00410042u, t.dstA1);
+ CHECK_EQ(0x00830084u, t.dstA2);
+ CHECK_EQ(0x00810082u, t.dstA3);
+ CHECK_EQ(0x00430044u, t.dstA4);
+ CHECK_EQ(0x00410042u, t.dstA5);
+ CHECK_EQ(0x00830084u, t.dstA6);
+ CHECK_EQ(0x00810082u, t.dstA7);
}
}
@@ -1406,11 +1406,11 @@ TEST(16) {
t.dst4 = 0;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0x12130304, t.dst0);
- CHECK_EQ(0x01021213, t.dst1);
- CHECK_EQ(0x00010003, t.dst2);
- CHECK_EQ(0x00000003, t.dst3);
- CHECK_EQ(0x11121313, t.dst4);
+ CHECK_EQ(0x12130304u, t.dst0);
+ CHECK_EQ(0x01021213u, t.dst1);
+ CHECK_EQ(0x00010003u, t.dst2);
+ CHECK_EQ(0x00000003u, t.dst3);
+ CHECK_EQ(0x11121313u, t.dst4);
}
@@ -1542,10 +1542,10 @@ TEST(udiv) {
#endif
F3 f = FUNCTION_CAST<F3>(code->entry());
Object* dummy;
- TEST_UDIV(0, 0, 0);
- TEST_UDIV(0, 1024, 0);
- TEST_UDIV(5, 10, 2);
- TEST_UDIV(3, 10, 3);
+ TEST_UDIV(0u, 0, 0);
+ TEST_UDIV(0u, 1024, 0);
+ TEST_UDIV(5u, 10, 2);
+ TEST_UDIV(3u, 10, 3);
USE(dummy);
}
}
@@ -1885,6 +1885,172 @@ TEST(code_relative_offset) {
}
+TEST(jump_tables1) {
+ // Test jump tables with forward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ __ stm(db_w, sp, lr.bit());
+
+ Label done;
+ __ BlockConstPoolFor(kNumCases + 2);
+ {
+ PredictableCodeSizeScope predictable(
+ &assm, (kNumCases + 2) * Assembler::kInstrSize);
+ __ ldr(pc, MemOperand(pc, r0, LSL, 2));
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ mov(r0, Operand(values[i]));
+ __ b(&done);
+ }
+
+ __ bind(&done);
+ __ ldm(ia_w, sp, pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+TEST(jump_tables2) {
+ // Test jump tables with backward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ __ stm(db_w, sp, lr.bit());
+
+ Label done, dispatch;
+ __ b(&dispatch);
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ mov(r0, Operand(values[i]));
+ __ b(&done);
+ }
+
+ __ bind(&dispatch);
+ __ BlockConstPoolFor(kNumCases + 2);
+ {
+ PredictableCodeSizeScope predictable(
+ &assm, (kNumCases + 2) * Assembler::kInstrSize);
+ __ ldr(pc, MemOperand(pc, r0, LSL, 2));
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ __ bind(&done);
+ __ ldm(ia_w, sp, pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+TEST(jump_tables3) {
+ // Test jump tables with backward jumps and embedded heap objects.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 256;
+ Handle<Object> values[kNumCases];
+ for (int i = 0; i < kNumCases; ++i) {
+ double value = isolate->random_number_generator()->NextDouble();
+ values[i] = isolate->factory()->NewHeapNumber(value, IMMUTABLE, TENURED);
+ }
+ Label labels[kNumCases];
+
+ __ stm(db_w, sp, lr.bit());
+
+ Label done, dispatch;
+ __ b(&dispatch);
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ mov(r0, Operand(values[i]));
+ __ b(&done);
+ }
+
+ __ bind(&dispatch);
+ __ BlockConstPoolFor(kNumCases + 2);
+ {
+ PredictableCodeSizeScope predictable(
+ &assm, (kNumCases + 2) * Assembler::kInstrSize);
+ __ ldr(pc, MemOperand(pc, r0, LSL, 2));
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ __ bind(&done);
+ __ ldm(ia_w, sp, pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ Handle<Object> result(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0), isolate);
+#ifdef OBJECT_PRINT
+ ::printf("f(%d) = ", i);
+ result->Print(std::cout);
+ ::printf("\n");
+#endif
+ CHECK(values[i].is_identical_to(result));
+ }
+}
+
+
TEST(ARMv8_vrintX) {
// Test the vrintX floating point instructions.
CcTest::InitializeVM();
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 108152efc2..df8477ba9f 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -3766,17 +3766,17 @@ TEST(add_sub_zero) {
__ Add(x0, x0, 0);
__ Sub(x1, x1, 0);
__ Sub(x2, x2, xzr);
- CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
+ CHECK_EQ(0u, __ SizeOfCodeGeneratedSince(&blob1));
Label blob2;
__ Bind(&blob2);
__ Add(w3, w3, 0);
- CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
+ CHECK_NE(0u, __ SizeOfCodeGeneratedSince(&blob2));
Label blob3;
__ Bind(&blob3);
__ Sub(w3, w3, wzr);
- CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
+ CHECK_NE(0u, __ SizeOfCodeGeneratedSince(&blob3));
END();
@@ -3810,7 +3810,7 @@ TEST(claim_drop_zero) {
__ DropBySMI(xzr, 8);
__ ClaimBySMI(xzr, 0);
__ DropBySMI(xzr, 0);
- CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
+ CHECK_EQ(0u, __ SizeOfCodeGeneratedSince(&start));
END();
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index f59c3c4aa1..46592a05d1 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -354,7 +354,7 @@ TEST(AssemblerIa329) {
CHECK_EQ(kLess, f(1.1, 2.2));
CHECK_EQ(kEqual, f(2.2, 2.2));
CHECK_EQ(kGreater, f(3.3, 2.2));
- CHECK_EQ(kNaN, f(v8::base::OS::nan_value(), 1.1));
+ CHECK_EQ(kNaN, f(std::numeric_limits<double>::quiet_NaN(), 1.1));
}
@@ -438,7 +438,7 @@ TEST(AssemblerMultiByteNop) {
#ifdef __GNUC__
-#define ELEMENT_COUNT 4
+#define ELEMENT_COUNT 4u
void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
@@ -455,7 +455,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
__ pop(ecx);
// Store input vector on the stack.
- for (int i = 0; i < ELEMENT_COUNT; ++i) {
+ for (unsigned i = 0; i < ELEMENT_COUNT; ++i) {
__ push(Immediate(vec->Get(i)->Int32Value()));
}
@@ -507,7 +507,7 @@ TEST(StackAlignmentForSSE2) {
int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
v8::Local<v8::Array> v8_vec = v8::Array::New(isolate, ELEMENT_COUNT);
- for (int i = 0; i < ELEMENT_COUNT; i++) {
+ for (unsigned i = 0; i < ELEMENT_COUNT; i++) {
v8_vec->Set(i, v8_num(vec[i]));
}
@@ -549,7 +549,7 @@ TEST(AssemblerIa32Extractps) {
uint64_t value1 = V8_2PART_UINT64_C(0x12345678, 87654321);
CHECK_EQ(0x12345678, f(uint64_to_double(value1)));
uint64_t value2 = V8_2PART_UINT64_C(0x87654321, 12345678);
- CHECK_EQ(0x87654321, f(uint64_to_double(value2)));
+ CHECK_EQ(static_cast<int>(0x87654321), f(uint64_to_double(value2)));
}
@@ -1043,4 +1043,100 @@ TEST(AssemblerX64FMA_ss) {
F10 f = FUNCTION_CAST<F10>(code->entry());
CHECK_EQ(0, f(9.26621069e-05f, -2.4607749f, -1.09587872f));
}
+
+
+TEST(AssemblerIa32JumpTables1) {
+ // Test jump tables with forward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ Label done, table;
+ __ mov(eax, Operand(esp, 4));
+ __ jmp(Operand::JumpTable(eax, times_4, &table));
+ __ ud2();
+ __ bind(&table);
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ mov(eax, Immediate(values[i]));
+ __ jmp(&done);
+ }
+
+ __ bind(&done);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = f(i);
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+TEST(AssemblerIa32JumpTables2) {
+ // Test jump tables with backward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ Label done, table;
+ __ mov(eax, Operand(esp, 4));
+ __ jmp(Operand::JumpTable(eax, times_4, &table));
+ __ ud2();
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ mov(eax, Immediate(values[i]));
+ __ jmp(&done);
+ }
+
+ __ bind(&table);
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+
+ __ bind(&done);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = f(i);
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 74dcc3a0a2..730de9d454 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <iostream> // NOLINT(readability/streams)
+
#include "src/v8.h"
#include "src/disassembler.h"
@@ -35,6 +37,7 @@
#include "test/cctest/cctest.h"
+
using namespace v8::internal;
@@ -65,8 +68,7 @@ TEST(MIPS0) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
- ::printf("f() = %d\n", res);
- CHECK_EQ(0xabc, res);
+ CHECK_EQ(static_cast<int32_t>(0xabc), res);
}
@@ -101,7 +103,6 @@ TEST(MIPS1) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F1 f = FUNCTION_CAST<F1>(code->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
- ::printf("f() = %d\n", res);
CHECK_EQ(1275, res);
}
@@ -239,8 +240,7 @@ TEST(MIPS2) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
- ::printf("f() = %d\n", res);
- CHECK_EQ(0x31415926, res);
+ CHECK_EQ(static_cast<int32_t>(0x31415926), res);
}
@@ -523,19 +523,19 @@ TEST(MIPS6) {
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0x11223344, t.r1);
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
#if __BYTE_ORDER == __LITTLE_ENDIAN
- CHECK_EQ(0x3344, t.r2);
- CHECK_EQ(0xffffbbcc, t.r3);
- CHECK_EQ(0x0000bbcc, t.r4);
- CHECK_EQ(0xffffffcc, t.r5);
- CHECK_EQ(0x3333bbcc, t.r6);
+ CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
+ CHECK_EQ(static_cast<int32_t>(0xffffbbcc), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x0000bbcc), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xffffffcc), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x3333bbcc), t.r6);
#elif __BYTE_ORDER == __BIG_ENDIAN
- CHECK_EQ(0x1122, t.r2);
- CHECK_EQ(0xffff99aa, t.r3);
- CHECK_EQ(0x000099aa, t.r4);
- CHECK_EQ(0xffffff99, t.r5);
- CHECK_EQ(0x99aa3333, t.r6);
+ CHECK_EQ(static_cast<int32_t>(0x1122), t.r2);
+ CHECK_EQ(static_cast<int32_t>(0xffff99aa), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x000099aa), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xffffff99), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x99aa3333), t.r6);
#else
#error Unknown endianness
#endif
@@ -710,21 +710,21 @@ TEST(MIPS8) {
t.input = 0x12345678;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0x81234567, t.result_rotr_4);
- CHECK_EQ(0x78123456, t.result_rotr_8);
- CHECK_EQ(0x67812345, t.result_rotr_12);
- CHECK_EQ(0x56781234, t.result_rotr_16);
- CHECK_EQ(0x45678123, t.result_rotr_20);
- CHECK_EQ(0x34567812, t.result_rotr_24);
- CHECK_EQ(0x23456781, t.result_rotr_28);
-
- CHECK_EQ(0x81234567, t.result_rotrv_4);
- CHECK_EQ(0x78123456, t.result_rotrv_8);
- CHECK_EQ(0x67812345, t.result_rotrv_12);
- CHECK_EQ(0x56781234, t.result_rotrv_16);
- CHECK_EQ(0x45678123, t.result_rotrv_20);
- CHECK_EQ(0x34567812, t.result_rotrv_24);
- CHECK_EQ(0x23456781, t.result_rotrv_28);
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotr_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotr_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotr_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotr_28);
+
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotrv_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotrv_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotrv_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotrv_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotrv_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotrv_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotrv_28);
}
@@ -809,9 +809,9 @@ TEST(MIPS10) {
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0x41DFFFFF, t.dbl_exp);
- CHECK_EQ(0xFF800000, t.dbl_mant);
- CHECK_EQ(0X7FFFFFFE, t.word);
+ CHECK_EQ(static_cast<int32_t>(0x41DFFFFF), t.dbl_exp);
+ CHECK_EQ(static_cast<int32_t>(0xFF800000), t.dbl_mant);
+ CHECK_EQ(static_cast<int32_t>(0x7FFFFFFE), t.word);
// 0x0FF00FF0 -> 2.6739096+e08
CHECK_EQ(2.6739096e08, t.b);
}
@@ -940,45 +940,45 @@ TEST(MIPS11) {
USE(dummy);
#if __BYTE_ORDER == __LITTLE_ENDIAN
- CHECK_EQ(0x44bbccdd, t.lwl_0);
- CHECK_EQ(0x3344ccdd, t.lwl_1);
- CHECK_EQ(0x223344dd, t.lwl_2);
- CHECK_EQ(0x11223344, t.lwl_3);
-
- CHECK_EQ(0x11223344, t.lwr_0);
- CHECK_EQ(0xaa112233, t.lwr_1);
- CHECK_EQ(0xaabb1122, t.lwr_2);
- CHECK_EQ(0xaabbcc11, t.lwr_3);
-
- CHECK_EQ(0x112233aa, t.swl_0);
- CHECK_EQ(0x1122aabb, t.swl_1);
- CHECK_EQ(0x11aabbcc, t.swl_2);
- CHECK_EQ(0xaabbccdd, t.swl_3);
-
- CHECK_EQ(0xaabbccdd, t.swr_0);
- CHECK_EQ(0xbbccdd44, t.swr_1);
- CHECK_EQ(0xccdd3344, t.swr_2);
- CHECK_EQ(0xdd223344, t.swr_3);
+ CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_0);
+ CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_1);
+ CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_2);
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_0);
+ CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_1);
+ CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_2);
+ CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_3);
+
+ CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_0);
+ CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_1);
+ CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_2);
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_0);
+ CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_1);
+ CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_2);
+ CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_3);
#elif __BYTE_ORDER == __BIG_ENDIAN
- CHECK_EQ(0x11223344, t.lwl_0);
- CHECK_EQ(0x223344dd, t.lwl_1);
- CHECK_EQ(0x3344ccdd, t.lwl_2);
- CHECK_EQ(0x44bbccdd, t.lwl_3);
-
- CHECK_EQ(0xaabbcc11, t.lwr_0);
- CHECK_EQ(0xaabb1122, t.lwr_1);
- CHECK_EQ(0xaa112233, t.lwr_2);
- CHECK_EQ(0x11223344, t.lwr_3);
-
- CHECK_EQ(0xaabbccdd, t.swl_0);
- CHECK_EQ(0x11aabbcc, t.swl_1);
- CHECK_EQ(0x1122aabb, t.swl_2);
- CHECK_EQ(0x112233aa, t.swl_3);
-
- CHECK_EQ(0xdd223344, t.swr_0);
- CHECK_EQ(0xccdd3344, t.swr_1);
- CHECK_EQ(0xbbccdd44, t.swr_2);
- CHECK_EQ(0xaabbccdd, t.swr_3);
+ 11223344, t.lwl_0);
+ CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_1);
+ CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_2);
+ CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_0);
+ CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_1);
+ CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_2);
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_0);
+ CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_1);
+ CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_2);
+ CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_0);
+ CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_1);
+ CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_2);
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_3);
#else
#error Unknown endianness
#endif
@@ -1245,12 +1245,12 @@ TEST(MIPS14) {
USE(dummy);
#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
-#define CHECK_ROUND_RESULT(type) \
- CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
- CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
+#define CHECK_ROUND_RESULT(type) \
+ CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
+ CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
- CHECK_EQ(kFPUInvalidResult, t.type##_invalid_result);
+ CHECK_EQ(kFPUInvalidResult, static_cast<uint>(t.type##_invalid_result));
CHECK_ROUND_RESULT(round);
CHECK_ROUND_RESULT(floor);
@@ -1275,4 +1275,216 @@ TEST(MIPS15) {
__ nop();
}
+
+TEST(jump_tables1) {
+ // Test jump tables with forward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ __ addiu(sp, sp, -4);
+ __ sw(ra, MemOperand(sp));
+
+ Label done;
+ {
+ PredictableCodeSizeScope predictable(
+ &assm, (kNumCases + 7) * Assembler::kInstrSize);
+ Label here;
+
+ __ bal(&here);
+ __ nop();
+ __ bind(&here);
+ __ sll(at, a0, 2);
+ __ addu(at, at, ra);
+ __ lw(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ jr(at);
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lui(v0, (values[i] >> 16) & 0xffff);
+ __ ori(v0, v0, values[i] & 0xffff);
+ __ b(&done);
+ __ nop();
+ }
+
+ __ bind(&done);
+ __ lw(ra, MemOperand(sp));
+ __ addiu(sp, sp, 4);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+TEST(jump_tables2) {
+ // Test jump tables with backward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ __ addiu(sp, sp, -4);
+ __ sw(ra, MemOperand(sp));
+
+ Label done, dispatch;
+ __ b(&dispatch);
+ __ nop();
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lui(v0, (values[i] >> 16) & 0xffff);
+ __ ori(v0, v0, values[i] & 0xffff);
+ __ b(&done);
+ __ nop();
+ }
+
+ __ bind(&dispatch);
+ {
+ PredictableCodeSizeScope predictable(
+ &assm, (kNumCases + 7) * Assembler::kInstrSize);
+ Label here;
+
+ __ bal(&here);
+ __ nop();
+ __ bind(&here);
+ __ sll(at, a0, 2);
+ __ addu(at, at, ra);
+ __ lw(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ jr(at);
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ __ bind(&done);
+ __ lw(ra, MemOperand(sp));
+ __ addiu(sp, sp, 4);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+TEST(jump_tables3) {
+ // Test jump tables with backward jumps and embedded heap objects.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 256;
+ Handle<Object> values[kNumCases];
+ for (int i = 0; i < kNumCases; ++i) {
+ double value = isolate->random_number_generator()->NextDouble();
+ values[i] = isolate->factory()->NewHeapNumber(value, IMMUTABLE, TENURED);
+ }
+ Label labels[kNumCases];
+ Object* obj;
+ int32_t imm32;
+
+ __ addiu(sp, sp, -4);
+ __ sw(ra, MemOperand(sp));
+
+ Label done, dispatch;
+ __ b(&dispatch);
+
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ obj = *values[i];
+ imm32 = reinterpret_cast<intptr_t>(obj);
+ __ lui(v0, (imm32 >> 16) & 0xffff);
+ __ ori(v0, v0, imm32 & 0xffff);
+ __ b(&done);
+ __ nop();
+ }
+
+ __ bind(&dispatch);
+ {
+ PredictableCodeSizeScope predictable(
+ &assm, (kNumCases + 7) * Assembler::kInstrSize);
+ Label here;
+
+ __ bal(&here);
+ __ nop();
+ __ bind(&here);
+ __ sll(at, a0, 2);
+ __ addu(at, at, ra);
+ __ lw(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ jr(at);
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ __ bind(&done);
+ __ lw(ra, MemOperand(sp));
+ __ addiu(sp, sp, 4);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ Handle<Object> result(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0), isolate);
+#ifdef OBJECT_PRINT
+ ::printf("f(%d) = ", i);
+ result->Print(std::cout);
+ ::printf("\n");
+#endif
+ CHECK(values[i].is_identical_to(result));
+ }
+}
+
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 1ec9a65c96..d4cabbcf72 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <iostream> // NOLINT(readability/streams)
+
#include "src/v8.h"
#include "src/disassembler.h"
@@ -66,7 +68,6 @@ TEST(MIPS0) {
F2 f = FUNCTION_CAST<F2>(code->entry());
int64_t res =
reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
- ::printf("f() = %ld\n", res);
CHECK_EQ(0xabcL, res);
}
@@ -103,7 +104,6 @@ TEST(MIPS1) {
F1 f = FUNCTION_CAST<F1>(code->entry());
int64_t res =
reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
- ::printf("f() = %ld\n", res);
CHECK_EQ(1275L, res);
}
@@ -250,7 +250,6 @@ TEST(MIPS2) {
F2 f = FUNCTION_CAST<F2>(code->entry());
int64_t res =
reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
- ::printf("f() = %ld\n", res);
CHECK_EQ(0x31415926L, res);
}
@@ -407,8 +406,8 @@ TEST(MIPS4) {
CHECK_EQ(2.75e11, t.a);
CHECK_EQ(2.75e11, t.b);
CHECK_EQ(1.5e22, t.c);
- CHECK_EQ(0xffffffffc25001d1L, t.high);
- CHECK_EQ(0xffffffffbf800000L, t.low);
+ CHECK_EQ(static_cast<int64_t>(0xffffffffc25001d1L), t.high);
+ CHECK_EQ(static_cast<int64_t>(0xffffffffbf800000L), t.low);
}
@@ -538,12 +537,12 @@ TEST(MIPS6) {
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0x11223344, t.r1);
- CHECK_EQ(0x3344, t.r2);
- CHECK_EQ(0xffffbbcc, t.r3);
- CHECK_EQ(0x0000bbcc, t.r4);
- CHECK_EQ(0xffffffcc, t.r5);
- CHECK_EQ(0x3333bbcc, t.r6);
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
+ CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
+ CHECK_EQ(static_cast<int32_t>(0xffffbbcc), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x0000bbcc), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xffffffcc), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x3333bbcc), t.r6);
}
@@ -712,21 +711,21 @@ TEST(MIPS8) {
t.input = 0x12345678;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0x81234567, t.result_rotr_4);
- CHECK_EQ(0x78123456, t.result_rotr_8);
- CHECK_EQ(0x67812345, t.result_rotr_12);
- CHECK_EQ(0x56781234, t.result_rotr_16);
- CHECK_EQ(0x45678123, t.result_rotr_20);
- CHECK_EQ(0x34567812, t.result_rotr_24);
- CHECK_EQ(0x23456781, t.result_rotr_28);
-
- CHECK_EQ(0x81234567, t.result_rotrv_4);
- CHECK_EQ(0x78123456, t.result_rotrv_8);
- CHECK_EQ(0x67812345, t.result_rotrv_12);
- CHECK_EQ(0x56781234, t.result_rotrv_16);
- CHECK_EQ(0x45678123, t.result_rotrv_20);
- CHECK_EQ(0x34567812, t.result_rotrv_24);
- CHECK_EQ(0x23456781, t.result_rotrv_28);
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotr_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotr_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotr_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotr_28);
+
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotrv_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotrv_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotrv_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotrv_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotrv_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotrv_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotrv_28);
}
@@ -838,15 +837,15 @@ TEST(MIPS10) {
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0x41DFFFFF, t.dbl_exp);
- CHECK_EQ(0xFFC00000, t.dbl_mant);
+ CHECK_EQ(static_cast<int32_t>(0x41DFFFFF), t.dbl_exp);
+ CHECK_EQ(static_cast<int32_t>(0xFFC00000), t.dbl_mant);
CHECK_EQ(0, t.long_hi);
- CHECK_EQ(0x7fffffff, t.long_lo);
+ CHECK_EQ(static_cast<int32_t>(0x7fffffff), t.long_lo);
CHECK_EQ(2.147483647e9, t.a_converted);
// 0xFF00FF00FF -> 1.095233372415e12.
CHECK_EQ(1.095233372415e12, t.b);
- CHECK_EQ(0xFF00FF00FF, t.b_long_as_int64);
+ CHECK_EQ(static_cast<int64_t>(0xFF00FF00FF), t.b_long_as_int64);
}
}
@@ -973,25 +972,25 @@ TEST(MIPS11) {
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(0x44bbccdd, t.lwl_0);
- CHECK_EQ(0x3344ccdd, t.lwl_1);
- CHECK_EQ(0x223344dd, t.lwl_2);
- CHECK_EQ(0x11223344, t.lwl_3);
-
- CHECK_EQ(0x11223344, t.lwr_0);
- CHECK_EQ(0xaa112233, t.lwr_1);
- CHECK_EQ(0xaabb1122, t.lwr_2);
- CHECK_EQ(0xaabbcc11, t.lwr_3);
-
- CHECK_EQ(0x112233aa, t.swl_0);
- CHECK_EQ(0x1122aabb, t.swl_1);
- CHECK_EQ(0x11aabbcc, t.swl_2);
- CHECK_EQ(0xaabbccdd, t.swl_3);
-
- CHECK_EQ(0xaabbccdd, t.swr_0);
- CHECK_EQ(0xbbccdd44, t.swr_1);
- CHECK_EQ(0xccdd3344, t.swr_2);
- CHECK_EQ(0xdd223344, t.swr_3);
+ CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_0);
+ CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_1);
+ CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_2);
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_0);
+ CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_1);
+ CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_2);
+ CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_3);
+
+ CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_0);
+ CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_1);
+ CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_2);
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_0);
+ CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_1);
+ CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_2);
+ CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_3);
}
}
@@ -1374,16 +1373,240 @@ TEST(MIPS16) {
USE(dummy);
// Unsigned data, 32 & 64.
- CHECK_EQ(0x1111111144332211L, t.r1);
- CHECK_EQ(0x0000000000002211L, t.r2);
+ CHECK_EQ(static_cast<int64_t>(0x1111111144332211L), t.r1);
+ CHECK_EQ(static_cast<int64_t>(0x0000000000002211L), t.r2);
// Signed data, 32 & 64.
- CHECK_EQ(0x33333333ffffbbccL, t.r3);
- CHECK_EQ(0xffffffff0000bbccL, t.r4);
+ CHECK_EQ(static_cast<int64_t>(0x33333333ffffbbccL), t.r3);
+ CHECK_EQ(static_cast<int64_t>(0xffffffff0000bbccL), t.r4);
// Signed data, 32 & 64.
- CHECK_EQ(0x55555555ffffffccL, t.r5);
- CHECK_EQ(0x000000003333bbccL, t.r6);
+ CHECK_EQ(static_cast<int64_t>(0x55555555ffffffccL), t.r5);
+ CHECK_EQ(static_cast<int64_t>(0x000000003333bbccL), t.r6);
+}
+
+
+TEST(jump_tables1) {
+ // Test jump tables with forward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ __ daddiu(sp, sp, -8);
+ __ sd(ra, MemOperand(sp));
+ if ((assm.pc_offset() & 7) == 0) {
+ __ nop();
+ }
+
+ Label done;
+ {
+ PredictableCodeSizeScope predictable(
+ &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
+ Label here;
+
+ __ bal(&here);
+ __ nop();
+ __ bind(&here);
+ __ dsll(at, a0, 3);
+ __ daddu(at, at, ra);
+ __ ld(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ jr(at);
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lui(v0, (values[i] >> 16) & 0xffff);
+ __ ori(v0, v0, values[i] & 0xffff);
+ __ b(&done);
+ __ nop();
+ }
+
+ __ bind(&done);
+ __ ld(ra, MemOperand(sp));
+ __ daddiu(sp, sp, 8);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], static_cast<int>(res));
+ }
}
+
+TEST(jump_tables2) {
+ // Test jump tables with backward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ __ daddiu(sp, sp, -8);
+ __ sd(ra, MemOperand(sp));
+
+ Label done, dispatch;
+ __ b(&dispatch);
+ __ nop();
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lui(v0, (values[i] >> 16) & 0xffff);
+ __ ori(v0, v0, values[i] & 0xffff);
+ __ b(&done);
+ __ nop();
+ }
+
+ if ((assm.pc_offset() & 7) == 0) {
+ __ nop();
+ }
+ __ bind(&dispatch);
+ {
+ PredictableCodeSizeScope predictable(
+ &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
+ Label here;
+
+ __ bal(&here);
+ __ nop();
+ __ bind(&here);
+ __ dsll(at, a0, 3);
+ __ daddu(at, at, ra);
+ __ ld(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ jr(at);
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ __ bind(&done);
+ __ ld(ra, MemOperand(sp));
+ __ daddiu(sp, sp, 8);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+TEST(jump_tables3) {
+ // Test jump tables with backward jumps and embedded heap objects.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ Handle<Object> values[kNumCases];
+ for (int i = 0; i < kNumCases; ++i) {
+ double value = isolate->random_number_generator()->NextDouble();
+ values[i] = isolate->factory()->NewHeapNumber(value, IMMUTABLE, TENURED);
+ }
+ Label labels[kNumCases];
+ Object* obj;
+ int64_t imm64;
+
+ __ daddiu(sp, sp, -8);
+ __ sd(ra, MemOperand(sp));
+
+ Label done, dispatch;
+ __ b(&dispatch);
+
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ obj = *values[i];
+ imm64 = reinterpret_cast<intptr_t>(obj);
+ __ lui(v0, (imm64 >> 32) & kImm16Mask);
+ __ ori(v0, v0, (imm64 >> 16) & kImm16Mask);
+ __ dsll(v0, v0, 16);
+ __ ori(v0, v0, imm64 & kImm16Mask);
+ __ b(&done);
+ __ nop();
+ }
+
+ __ stop("chk");
+ if ((assm.pc_offset() & 7) == 0) {
+ __ nop();
+ }
+ __ bind(&dispatch);
+ {
+ PredictableCodeSizeScope predictable(
+ &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
+ Label here;
+
+ __ bal(&here);
+ __ nop();
+ __ bind(&here);
+ __ dsll(at, a0, 3);
+ __ daddu(at, at, ra);
+ __ ld(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ jr(at);
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ __ bind(&done);
+ __ ld(ra, MemOperand(sp));
+ __ addiu(sp, sp, 8);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+// code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ Handle<Object> result(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0), isolate);
+#ifdef OBJECT_PRINT
+ ::printf("f(%d) = ", i);
+ result->Print(std::cout);
+ ::printf("\n");
+#endif
+ CHECK(values[i].is_identical_to(result));
+ }
+}
+
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
new file mode 100644
index 0000000000..4a2e7d3983
--- /dev/null
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -0,0 +1,1060 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/v8.h"
+
+#include "src/disassembler.h"
+#include "src/factory.h"
+#include "src/ppc/assembler-ppc-inl.h"
+#include "src/ppc/simulator-ppc.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+// Define these function prototypes to match JSEntryFunction in execution.cc.
+typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
+typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
+typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
+
+
+#define __ assm.
+
+// Simple add parameter 1 to parameter 2 and return
+TEST(0) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ __ function_descriptor();
+
+ __ add(r3, r3, r4);
+ __ blr();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, 3, 4, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(7, static_cast<int>(res));
+}
+
+
+// Loop 100 times, adding loop counter to result
+TEST(1) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ __ function_descriptor();
+
+ __ mr(r4, r3);
+ __ li(r3, Operand::Zero());
+ __ b(&C);
+
+ __ bind(&L);
+ __ add(r3, r3, r4);
+ __ subi(r4, r4, Operand(1));
+
+ __ bind(&C);
+ __ cmpi(r4, Operand::Zero());
+ __ bne(&L);
+ __ blr();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, 100, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(5050, static_cast<int>(res));
+}
+
+
+TEST(2) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+ __ function_descriptor();
+
+ __ mr(r4, r3);
+ __ li(r3, Operand(1));
+ __ b(&C);
+
+ __ bind(&L);
+#if defined(V8_TARGET_ARCH_PPC64)
+ __ mulld(r3, r4, r3);
+#else
+ __ mullw(r3, r4, r3);
+#endif
+ __ subi(r4, r4, Operand(1));
+
+ __ bind(&C);
+ __ cmpi(r4, Operand::Zero());
+ __ bne(&L);
+ __ blr();
+
+ // some relocated stuff here, not executed
+ __ RecordComment("dead code, just testing relocations");
+ __ mov(r0, Operand(isolate->factory()->true_value()));
+ __ RecordComment("dead code, just testing immediate operands");
+ __ mov(r0, Operand(-1));
+ __ mov(r0, Operand(0xFF000000));
+ __ mov(r0, Operand(0xF0F0F0F0));
+ __ mov(r0, Operand(0xFFF0FFFF));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, 10, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(3628800, static_cast<int>(res));
+}
+
+
+TEST(3) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int i;
+ char c;
+ int16_t s;
+ } T;
+ T t;
+
+ Assembler assm(Isolate::Current(), NULL, 0);
+ Label L, C;
+
+ __ function_descriptor();
+
+// build a frame
+#if V8_TARGET_ARCH_PPC64
+ __ stdu(sp, MemOperand(sp, -32));
+ __ std(fp, MemOperand(sp, 24));
+#else
+ __ stwu(sp, MemOperand(sp, -16));
+ __ stw(fp, MemOperand(sp, 12));
+#endif
+ __ mr(fp, sp);
+
+ // r4 points to our struct
+ __ mr(r4, r3);
+
+ // modify field int i of struct
+ __ lwz(r3, MemOperand(r4, OFFSET_OF(T, i)));
+ __ srwi(r5, r3, Operand(1));
+ __ stw(r5, MemOperand(r4, OFFSET_OF(T, i)));
+
+ // modify field char c of struct
+ __ lbz(r5, MemOperand(r4, OFFSET_OF(T, c)));
+ __ add(r3, r5, r3);
+ __ slwi(r5, r5, Operand(2));
+ __ stb(r5, MemOperand(r4, OFFSET_OF(T, c)));
+
+ // modify field int16_t s of struct
+ __ lhz(r5, MemOperand(r4, OFFSET_OF(T, s)));
+ __ add(r3, r5, r3);
+ __ srwi(r5, r5, Operand(3));
+ __ sth(r5, MemOperand(r4, OFFSET_OF(T, s)));
+
+// restore frame
+#if V8_TARGET_ARCH_PPC64
+ __ addi(r11, fp, Operand(32));
+ __ ld(fp, MemOperand(r11, -8));
+#else
+ __ addi(r11, fp, Operand(16));
+ __ lwz(fp, MemOperand(r11, -4));
+#endif
+ __ mr(sp, r11);
+ __ blr();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ code->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ t.i = 100000;
+ t.c = 10;
+ t.s = 1000;
+ intptr_t res =
+ reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(101010, static_cast<int>(res));
+ CHECK_EQ(100000 / 2, t.i);
+ CHECK_EQ(10 * 4, t.c);
+ CHECK_EQ(1000 / 8, t.s);
+}
+
+#if 0
+TEST(4) {
+ // Test the VFP floating point instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ int i;
+ double m;
+ double n;
+ float x;
+ float y;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles and floats.
+ Assembler assm(Isolate::Current(), NULL, 0);
+ Label L, C;
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ mov(r4, Operand(r0));
+ __ vldr(d6, r4, OFFSET_OF(T, a));
+ __ vldr(d7, r4, OFFSET_OF(T, b));
+ __ vadd(d5, d6, d7);
+ __ vstr(d5, r4, OFFSET_OF(T, c));
+
+ __ vmov(r2, r3, d5);
+ __ vmov(d4, r2, r3);
+ __ vstr(d4, r4, OFFSET_OF(T, b));
+
+ // Load t.x and t.y, switch values, and store back to the struct.
+ __ vldr(s0, r4, OFFSET_OF(T, x));
+ __ vldr(s31, r4, OFFSET_OF(T, y));
+ __ vmov(s16, s0);
+ __ vmov(s0, s31);
+ __ vmov(s31, s16);
+ __ vstr(s0, r4, OFFSET_OF(T, x));
+ __ vstr(s31, r4, OFFSET_OF(T, y));
+
+ // Move a literal into a register that can be encoded in the instruction.
+ __ vmov(d4, 1.0);
+ __ vstr(d4, r4, OFFSET_OF(T, e));
+
+ // Move a literal into a register that requires 64 bits to encode.
+ // 0x3ff0000010000000 = 1.000000059604644775390625
+ __ vmov(d4, 1.000000059604644775390625);
+ __ vstr(d4, r4, OFFSET_OF(T, d));
+
+ // Convert from floating point to integer.
+ __ vmov(d4, 2.0);
+ __ vcvt_s32_f64(s31, d4);
+ __ vstr(s31, r4, OFFSET_OF(T, i));
+
+ // Convert from integer to floating point.
+ __ mov(lr, Operand(42));
+ __ vmov(s31, lr);
+ __ vcvt_f64_s32(d4, s31);
+ __ vstr(d4, r4, OFFSET_OF(T, f));
+
+ // Test vabs.
+ __ vldr(d1, r4, OFFSET_OF(T, g));
+ __ vabs(d0, d1);
+ __ vstr(d0, r4, OFFSET_OF(T, g));
+ __ vldr(d2, r4, OFFSET_OF(T, h));
+ __ vabs(d0, d2);
+ __ vstr(d0, r4, OFFSET_OF(T, h));
+
+ // Test vneg.
+ __ vldr(d1, r4, OFFSET_OF(T, m));
+ __ vneg(d0, d1);
+ __ vstr(d0, r4, OFFSET_OF(T, m));
+ __ vldr(d1, r4, OFFSET_OF(T, n));
+ __ vneg(d0, d1);
+ __ vstr(d0, r4, OFFSET_OF(T, n));
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5;
+ t.b = 2.75;
+ t.c = 17.17;
+ t.d = 0.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.g = -2718.2818;
+ t.h = 31415926.5;
+ t.i = 0;
+ t.m = -2718.2818;
+ t.n = 123.456;
+ t.x = 4.5;
+ t.y = 9.0;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(4.5, t.y);
+ CHECK_EQ(9.0, t.x);
+ CHECK_EQ(-123.456, t.n);
+ CHECK_EQ(2718.2818, t.m);
+ CHECK_EQ(2, t.i);
+ CHECK_EQ(2718.2818, t.g);
+ CHECK_EQ(31415926.5, t.h);
+ CHECK_EQ(42.0, t.f);
+ CHECK_EQ(1.0, t.e);
+ CHECK_EQ(1.000000059604644775390625, t.d);
+ CHECK_EQ(4.25, t.c);
+ CHECK_EQ(4.25, t.b);
+ CHECK_EQ(1.5, t.a);
+ }
+}
+
+
+TEST(5) {
+ // Test the ARMv7 bitfield instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatures::Scope scope(ARMv7);
+ // On entry, r0 = 0xAAAAAAAA = 0b10..10101010.
+ __ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555
+ __ sbfx(r0, r0, 0, 5); // 0b11..111111110101 = -11
+ __ bfc(r0, 1, 3); // 0b11..111111110001 = -15
+ __ mov(r1, Operand(7));
+ __ bfi(r0, r1, 3, 3); // 0b11..111111111001 = -7
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0xAAAAAAAA, 0, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(-7, res);
+ }
+}
+
+
+TEST(6) {
+ // Test saturating instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatures::Scope scope(ARMv7);
+ __ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
+ __ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
+ __ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0.
+ __ addi(r0, r1, Operand(r2));
+ __ addi(r0, r0, Operand(r3));
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0xFFFF, 0, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(382, res);
+ }
+}
+
+enum VCVTTypes {
+ s32_f64,
+ u32_f64
+};
+
+static void TestRoundingMode(VCVTTypes types,
+ VFPRoundingMode mode,
+ double value,
+ int expected,
+ bool expected_exception = false) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ Label wrong_exception;
+
+ __ vmrs(r1);
+ // Set custom FPSCR.
+ __ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask));
+ __ orr(r2, r2, Operand(mode));
+ __ vmsr(r2);
+
+ // Load value, convert, and move back result to r0 if everything went well.
+ __ vmov(d1, value);
+ switch (types) {
+ case s32_f64:
+ __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ case u32_f64:
+ __ vcvt_u32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // Check for vfp exceptions
+ __ vmrs(r2);
+ __ tst(r2, Operand(kVFPExceptionMask));
+ // Check that we behaved as expected.
+ __ b(&wrong_exception,
+ expected_exception ? eq : ne);
+ // There was no exception. Retrieve the result and return.
+ __ vmov(r0, s0);
+ __ mov(pc, Operand(lr));
+
+ // The exception behaviour is not what we expected.
+ // Load a special value and return.
+ __ bind(&wrong_exception);
+ __ mov(r0, Operand(11223344));
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ ::printf("res = %d\n", res);
+ CHECK_EQ(expected, res);
+ }
+}
+
+
+TEST(7) {
+ // Test vfp rounding modes.
+
+ // s32_f64 (double to integer).
+
+ TestRoundingMode(s32_f64, RN, 0, 0);
+ TestRoundingMode(s32_f64, RN, 0.5, 0);
+ TestRoundingMode(s32_f64, RN, -0.5, 0);
+ TestRoundingMode(s32_f64, RN, 1.5, 2);
+ TestRoundingMode(s32_f64, RN, -1.5, -2);
+ TestRoundingMode(s32_f64, RN, 123.7, 124);
+ TestRoundingMode(s32_f64, RN, -123.7, -124);
+ TestRoundingMode(s32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RN, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.5), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 1.0), kMinInt, true);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.51), kMinInt, true);
+
+ TestRoundingMode(s32_f64, RM, 0, 0);
+ TestRoundingMode(s32_f64, RM, 0.5, 0);
+ TestRoundingMode(s32_f64, RM, -0.5, -1);
+ TestRoundingMode(s32_f64, RM, 123.7, 123);
+ TestRoundingMode(s32_f64, RM, -123.7, -124);
+ TestRoundingMode(s32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RM, -123456.2, -123457);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RM, (kMinInt - 0.5), kMinInt, true);
+ TestRoundingMode(s32_f64, RM, (kMinInt + 0.5), kMinInt);
+
+ TestRoundingMode(s32_f64, RZ, 0, 0);
+ TestRoundingMode(s32_f64, RZ, 0.5, 0);
+ TestRoundingMode(s32_f64, RZ, -0.5, 0);
+ TestRoundingMode(s32_f64, RZ, 123.7, 123);
+ TestRoundingMode(s32_f64, RZ, -123.7, -123);
+ TestRoundingMode(s32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RZ, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 1.0), kMinInt, true);
+
+
+ // u32_f64 (double to integer).
+
+ // Negative values.
+ TestRoundingMode(u32_f64, RN, -0.5, 0);
+ TestRoundingMode(u32_f64, RN, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RN, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RM, -0.5, 0, true);
+ TestRoundingMode(u32_f64, RM, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RM, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RZ, -0.5, 0);
+ TestRoundingMode(u32_f64, RZ, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
+
+ // Positive values.
+ // kMaxInt is the maximum *signed* integer: 0x7fffffff.
+ static const uint32_t kMaxUInt = 0xffffffffu;
+ TestRoundingMode(u32_f64, RZ, 0, 0);
+ TestRoundingMode(u32_f64, RZ, 0.5, 0);
+ TestRoundingMode(u32_f64, RZ, 123.7, 123);
+ TestRoundingMode(u32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RM, 0, 0);
+ TestRoundingMode(u32_f64, RM, 0.5, 0);
+ TestRoundingMode(u32_f64, RM, 123.7, 123);
+ TestRoundingMode(u32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RN, 0, 0);
+ TestRoundingMode(u32_f64, RN, 0.5, 0);
+ TestRoundingMode(u32_f64, RN, 1.5, 2);
+ TestRoundingMode(u32_f64, RN, 123.7, 124);
+ TestRoundingMode(u32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.5),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.49), kMaxUInt);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.5), kMaxUInt, true);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true);
+}
+
+
+TEST(8) {
+ // Test VFP multi load/store with ia_w.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ } D;
+ D d;
+
+ typedef struct {
+ float a;
+ float b;
+ float c;
+ float d;
+ float e;
+ float f;
+ float g;
+ float h;
+ } F;
+ F f;
+
+ // Create a function that uses vldm/vstm to move some double and
+ // single precision values around in memory.
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vldm(ia_w, r4, d0, d3);
+ __ vldm(ia_w, r4, d4, d7);
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vstm(ia_w, r4, d6, d7);
+ __ vstm(ia_w, r4, d0, d5);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vldm(ia_w, r4, s0, s3);
+ __ vldm(ia_w, r4, s4, s7);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vstm(ia_w, r4, s6, s7);
+ __ vstm(ia_w, r4, s0, s5);
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ d.a = 1.1;
+ d.b = 2.2;
+ d.c = 3.3;
+ d.d = 4.4;
+ d.e = 5.5;
+ d.f = 6.6;
+ d.g = 7.7;
+ d.h = 8.8;
+
+ f.a = 1.0;
+ f.b = 2.0;
+ f.c = 3.0;
+ f.d = 4.0;
+ f.e = 5.0;
+ f.f = 6.0;
+ f.g = 7.0;
+ f.h = 8.0;
+
+ Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(7.7, d.a);
+ CHECK_EQ(8.8, d.b);
+ CHECK_EQ(1.1, d.c);
+ CHECK_EQ(2.2, d.d);
+ CHECK_EQ(3.3, d.e);
+ CHECK_EQ(4.4, d.f);
+ CHECK_EQ(5.5, d.g);
+ CHECK_EQ(6.6, d.h);
+
+ CHECK_EQ(7.0, f.a);
+ CHECK_EQ(8.0, f.b);
+ CHECK_EQ(1.0, f.c);
+ CHECK_EQ(2.0, f.d);
+ CHECK_EQ(3.0, f.e);
+ CHECK_EQ(4.0, f.f);
+ CHECK_EQ(5.0, f.g);
+ CHECK_EQ(6.0, f.h);
+ }
+}
+
+
+TEST(9) {
+ // Test VFP multi load/store with ia.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ } D;
+ D d;
+
+ typedef struct {
+ float a;
+ float b;
+ float c;
+ float d;
+ float e;
+ float f;
+ float g;
+ float h;
+ } F;
+ F f;
+
+ // Create a function that uses vldm/vstm to move some double and
+ // single precision values around in memory.
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vldm(ia, r4, d0, d3);
+ __ addi(r4, r4, Operand(4 * 8));
+ __ vldm(ia, r4, d4, d7);
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vstm(ia, r4, d6, d7);
+ __ addi(r4, r4, Operand(2 * 8));
+ __ vstm(ia, r4, d0, d5);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vldm(ia, r4, s0, s3);
+ __ addi(r4, r4, Operand(4 * 4));
+ __ vldm(ia, r4, s4, s7);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vstm(ia, r4, s6, s7);
+ __ addi(r4, r4, Operand(2 * 4));
+ __ vstm(ia, r4, s0, s5);
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ d.a = 1.1;
+ d.b = 2.2;
+ d.c = 3.3;
+ d.d = 4.4;
+ d.e = 5.5;
+ d.f = 6.6;
+ d.g = 7.7;
+ d.h = 8.8;
+
+ f.a = 1.0;
+ f.b = 2.0;
+ f.c = 3.0;
+ f.d = 4.0;
+ f.e = 5.0;
+ f.f = 6.0;
+ f.g = 7.0;
+ f.h = 8.0;
+
+ Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(7.7, d.a);
+ CHECK_EQ(8.8, d.b);
+ CHECK_EQ(1.1, d.c);
+ CHECK_EQ(2.2, d.d);
+ CHECK_EQ(3.3, d.e);
+ CHECK_EQ(4.4, d.f);
+ CHECK_EQ(5.5, d.g);
+ CHECK_EQ(6.6, d.h);
+
+ CHECK_EQ(7.0, f.a);
+ CHECK_EQ(8.0, f.b);
+ CHECK_EQ(1.0, f.c);
+ CHECK_EQ(2.0, f.d);
+ CHECK_EQ(3.0, f.e);
+ CHECK_EQ(4.0, f.f);
+ CHECK_EQ(5.0, f.g);
+ CHECK_EQ(6.0, f.h);
+ }
+}
+
+
+TEST(10) {
+ // Test VFP multi load/store with db_w.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double g;
+ double h;
+ } D;
+ D d;
+
+ typedef struct {
+ float a;
+ float b;
+ float c;
+ float d;
+ float e;
+ float f;
+ float g;
+ float h;
+ } F;
+ F f;
+
+ // Create a function that uses vldm/vstm to move some double and
+ // single precision values around in memory.
+ Assembler assm(isolate, NULL, 0);
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, h) + 8));
+ __ vldm(db_w, r4, d4, d7);
+ __ vldm(db_w, r4, d0, d3);
+
+ __ addi(r4, r0, Operand(OFFSET_OF(D, h) + 8));
+ __ vstm(db_w, r4, d0, d5);
+ __ vstm(db_w, r4, d6, d7);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, h) + 4));
+ __ vldm(db_w, r4, s4, s7);
+ __ vldm(db_w, r4, s0, s3);
+
+ __ addi(r4, r1, Operand(OFFSET_OF(F, h) + 4));
+ __ vstm(db_w, r4, s0, s5);
+ __ vstm(db_w, r4, s6, s7);
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ d.a = 1.1;
+ d.b = 2.2;
+ d.c = 3.3;
+ d.d = 4.4;
+ d.e = 5.5;
+ d.f = 6.6;
+ d.g = 7.7;
+ d.h = 8.8;
+
+ f.a = 1.0;
+ f.b = 2.0;
+ f.c = 3.0;
+ f.d = 4.0;
+ f.e = 5.0;
+ f.f = 6.0;
+ f.g = 7.0;
+ f.h = 8.0;
+
+ Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(7.7, d.a);
+ CHECK_EQ(8.8, d.b);
+ CHECK_EQ(1.1, d.c);
+ CHECK_EQ(2.2, d.d);
+ CHECK_EQ(3.3, d.e);
+ CHECK_EQ(4.4, d.f);
+ CHECK_EQ(5.5, d.g);
+ CHECK_EQ(6.6, d.h);
+
+ CHECK_EQ(7.0, f.a);
+ CHECK_EQ(8.0, f.b);
+ CHECK_EQ(1.0, f.c);
+ CHECK_EQ(2.0, f.d);
+ CHECK_EQ(3.0, f.e);
+ CHECK_EQ(4.0, f.f);
+ CHECK_EQ(5.0, f.g);
+ CHECK_EQ(6.0, f.h);
+ }
+}
+
+
+TEST(11) {
+ // Test instructions using the carry flag.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ int32_t a;
+ int32_t b;
+ int32_t c;
+ int32_t d;
+ } I;
+ I i;
+
+ i.a = 0xabcd0001;
+ i.b = 0xabcd0000;
+
+ Assembler assm(isolate, NULL, 0);
+
+ // Test HeapObject untagging.
+ __ ldr(r1, MemOperand(r0, OFFSET_OF(I, a)));
+ __ mov(r1, Operand(r1, ASR, 1), SetCC);
+ __ adc(r1, r1, Operand(r1), LeaveCC, cs);
+ __ str(r1, MemOperand(r0, OFFSET_OF(I, a)));
+
+ __ ldr(r2, MemOperand(r0, OFFSET_OF(I, b)));
+ __ mov(r2, Operand(r2, ASR, 1), SetCC);
+ __ adc(r2, r2, Operand(r2), LeaveCC, cs);
+ __ str(r2, MemOperand(r0, OFFSET_OF(I, b)));
+
+ // Test corner cases.
+ __ mov(r1, Operand(0xffffffff));
+ __ mov(r2, Operand::Zero());
+ __ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry.
+ __ adc(r3, r1, Operand(r2));
+ __ str(r3, MemOperand(r0, OFFSET_OF(I, c)));
+
+ __ mov(r1, Operand(0xffffffff));
+ __ mov(r2, Operand::Zero());
+ __ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry.
+ __ adc(r3, r1, Operand(r2));
+ __ str(r3, MemOperand(r0, OFFSET_OF(I, d)));
+
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ Object* dummy = CALL_GENERATED_CODE(f, &i, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(0xabcd0001, i.a);
+ CHECK_EQ(static_cast<int32_t>(0xabcd0000) >> 1, i.b);
+ CHECK_EQ(0x00000000, i.c);
+ CHECK_EQ(0xffffffff, i.d);
+}
+
+
+TEST(12) {
+ // Test chaining of label usages within instructions (issue 1644).
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+ Label target;
+ __ b(eq, &target);
+ __ b(ne, &target);
+ __ bind(&target);
+ __ nop();
+}
+#endif
+
+#undef __
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index 23d0be64ae..f5d59ded9b 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -25,7 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdlib.h>
+#include <cstdlib>
+#include <iostream>
#include "src/v8.h"
@@ -51,9 +52,9 @@ using namespace v8::internal;
typedef int (*F0)();
typedef int (*F1)(int64_t x);
typedef int (*F2)(int64_t x, int64_t y);
-typedef int (*F3)(double x);
-typedef int64_t (*F4)(int64_t* x, int64_t* y);
-typedef int64_t (*F5)(int64_t x);
+typedef unsigned (*F3)(double x);
+typedef uint64_t (*F4)(uint64_t* x, uint64_t* y);
+typedef uint64_t (*F5)(uint64_t x);
#ifdef _WIN64
static const Register arg1 = rcx;
@@ -220,9 +221,9 @@ TEST(AssemblerX64XchglOperations) {
CodeDesc desc;
assm.GetCode(&desc);
// Call the function from C++.
- int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
- int64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
- int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
+ uint64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 40000000), left);
CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 20000000), right);
USE(result);
@@ -245,9 +246,9 @@ TEST(AssemblerX64OrlOperations) {
CodeDesc desc;
assm.GetCode(&desc);
// Call the function from C++.
- int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
- int64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
- int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
+ uint64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
CHECK_EQ(V8_2PART_UINT64_C(0x10000000, 60000000), left);
USE(result);
}
@@ -269,8 +270,8 @@ TEST(AssemblerX64RollOperations) {
CodeDesc desc;
assm.GetCode(&desc);
// Call the function from C++.
- int64_t src = V8_2PART_UINT64_C(0x10000000, C0000000);
- int64_t result = FUNCTION_CAST<F5>(buffer)(src);
+ uint64_t src = V8_2PART_UINT64_C(0x10000000, C0000000);
+ uint64_t result = FUNCTION_CAST<F5>(buffer)(src);
CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 80000001), result);
}
@@ -291,9 +292,9 @@ TEST(AssemblerX64SublOperations) {
CodeDesc desc;
assm.GetCode(&desc);
// Call the function from C++.
- int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
- int64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
- int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ uint64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
+ uint64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
CHECK_EQ(V8_2PART_UINT64_C(0x10000000, e0000000), left);
USE(result);
}
@@ -321,10 +322,10 @@ TEST(AssemblerX64TestlOperations) {
CodeDesc desc;
assm.GetCode(&desc);
// Call the function from C++.
- int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
- int64_t right = V8_2PART_UINT64_C(0x30000000, 00000000);
- int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
- CHECK_EQ(static_cast<int64_t>(1), result);
+ uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ uint64_t right = V8_2PART_UINT64_C(0x30000000, 00000000);
+ uint64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ CHECK_EQ(1u, result);
}
@@ -344,9 +345,9 @@ TEST(AssemblerX64XorlOperations) {
CodeDesc desc;
assm.GetCode(&desc);
// Call the function from C++.
- int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
- int64_t right = V8_2PART_UINT64_C(0x30000000, 60000000);
- int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ uint64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ uint64_t right = V8_2PART_UINT64_C(0x30000000, 60000000);
+ uint64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
CHECK_EQ(V8_2PART_UINT64_C(0x10000000, 40000000), left);
USE(result);
}
@@ -586,7 +587,7 @@ TEST(AssemblerMultiByteNop) {
#ifdef __GNUC__
-#define ELEMENT_COUNT 4
+#define ELEMENT_COUNT 4u
void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(CcTest::isolate());
@@ -603,7 +604,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
__ popq(rcx);
// Store input vector on the stack.
- for (int i = 0; i < ELEMENT_COUNT; i++) {
+ for (unsigned i = 0; i < ELEMENT_COUNT; i++) {
__ movl(rax, Immediate(vec->Get(i)->Int32Value()));
__ shlq(rax, Immediate(0x20));
__ orq(rax, Immediate(vec->Get(++i)->Int32Value()));
@@ -657,7 +658,7 @@ TEST(StackAlignmentForSSE2) {
int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
v8::Local<v8::Array> v8_vec = v8::Array::New(isolate, ELEMENT_COUNT);
- for (int i = 0; i < ELEMENT_COUNT; i++) {
+ for (unsigned i = 0; i < ELEMENT_COUNT; i++) {
v8_vec->Set(i, v8_num(vec[i]));
}
@@ -696,9 +697,9 @@ TEST(AssemblerX64Extractps) {
F3 f = FUNCTION_CAST<F3>(code->entry());
uint64_t value1 = V8_2PART_UINT64_C(0x12345678, 87654321);
- CHECK_EQ(0x12345678, f(uint64_to_double(value1)));
+ CHECK_EQ(0x12345678u, f(uint64_to_double(value1)));
uint64_t value2 = V8_2PART_UINT64_C(0x87654321, 12345678);
- CHECK_EQ(0x87654321, f(uint64_to_double(value2)));
+ CHECK_EQ(0x87654321u, f(uint64_to_double(value2)));
}
@@ -1186,4 +1187,100 @@ TEST(AssemblerX64FMA_ss) {
F8 f = FUNCTION_CAST<F8>(code->entry());
CHECK_EQ(0, f(9.26621069e-05f, -2.4607749f, -1.09587872f));
}
+
+
+TEST(AssemblerX64JumpTables1) {
+ // Test jump tables with forward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ Label done, table;
+ __ leaq(arg2, Operand(&table));
+ __ jmp(Operand(arg2, arg1, times_8, 0));
+ __ ud2();
+ __ bind(&table);
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dq(&labels[i]);
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ movq(rax, Immediate(values[i]));
+ __ jmp(&done);
+ }
+
+ __ bind(&done);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = f(i);
+ PrintF("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+TEST(AssemblerX64JumpTables2) {
+ // Test jump tables with backwards jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ Label done, table;
+ __ leaq(arg2, Operand(&table));
+ __ jmp(Operand(arg2, arg1, times_8, 0));
+ __ ud2();
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ movq(rax, Immediate(values[i]));
+ __ jmp(&done);
+ }
+
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&table);
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dq(&labels[i]);
+ }
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = f(i);
+ PrintF("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-x87.cc b/deps/v8/test/cctest/test-assembler-x87.cc
index 8341f9b49e..c07be845b9 100644
--- a/deps/v8/test/cctest/test-assembler-x87.cc
+++ b/deps/v8/test/cctest/test-assembler-x87.cc
@@ -229,7 +229,7 @@ TEST(AssemblerIa329) {
CHECK_EQ(kLess, f(1.1, 2.2));
CHECK_EQ(kEqual, f(2.2, 2.2));
CHECK_EQ(kGreater, f(3.3, 2.2));
- CHECK_EQ(kNaN, f(v8::base::OS::nan_value(), 1.1));
+ CHECK_EQ(kNaN, f(std::numeric_limits<double>::quiet_NaN(), 1.1));
}
@@ -312,4 +312,98 @@ TEST(AssemblerMultiByteNop) {
}
+TEST(AssemblerIa32JumpTables1) {
+ // Test jump tables with forward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ Label done, table;
+ __ mov(eax, Operand(esp, 4));
+ __ jmp(Operand::JumpTable(eax, times_4, &table));
+ __ ud2();
+ __ bind(&table);
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ mov(eax, Immediate(values[i]));
+ __ jmp(&done);
+ }
+
+ __ bind(&done);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = f(i);
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+TEST(AssemblerIa32JumpTables2) {
+ // Test jump tables with backward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ Label done, table;
+ __ mov(eax, Operand(esp, 4));
+ __ jmp(Operand::JumpTable(eax, times_4, &table));
+ __ ud2();
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ mov(eax, Immediate(values[i]));
+ __ jmp(&done);
+ }
+
+ __ bind(&table);
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+
+ __ bind(&done);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int res = f(i);
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 096d5c788f..7d8b4059f5 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -38,8 +38,7 @@ TEST(List) {
List<AstNode*>* list = new List<AstNode*>(0);
CHECK_EQ(0, list->length());
- Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate);
+ Zone zone;
AstValueFactory value_factory(&zone, 0);
AstNodeFactory factory(&value_factory);
AstNode* node = factory.NewEmptyStatement(RelocInfo::kNoPosition);
diff --git a/deps/v8/test/cctest/test-bignum-dtoa.cc b/deps/v8/test/cctest/test-bignum-dtoa.cc
index 9262e018c8..800a3ab9c9 100644
--- a/deps/v8/test/cctest/test-bignum-dtoa.cc
+++ b/deps/v8/test/cctest/test-bignum-dtoa.cc
@@ -63,106 +63,106 @@ TEST(BignumDtoaVariousDoubles) {
int point;
BignumDtoa(1.0, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
BignumDtoa(1.0, BIGNUM_DTOA_FIXED, 3, buffer, &length, &point);
CHECK_GE(3, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
BignumDtoa(1.0, BIGNUM_DTOA_PRECISION, 3, buffer, &length, &point);
CHECK_GE(3, length);
TrimRepresentation(buffer);
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
BignumDtoa(1.5, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ("15", buffer.start());
+ CHECK_EQ(0, strcmp("15", buffer.start()));
CHECK_EQ(1, point);
BignumDtoa(1.5, BIGNUM_DTOA_FIXED, 10, buffer, &length, &point);
CHECK_GE(10, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("15", buffer.start());
+ CHECK_EQ(0, strcmp("15", buffer.start()));
CHECK_EQ(1, point);
BignumDtoa(1.5, BIGNUM_DTOA_PRECISION, 10, buffer, &length, &point);
CHECK_GE(10, length);
TrimRepresentation(buffer);
- CHECK_EQ("15", buffer.start());
+ CHECK_EQ(0, strcmp("15", buffer.start()));
CHECK_EQ(1, point);
double min_double = 5e-324;
BignumDtoa(min_double, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ("5", buffer.start());
+ CHECK_EQ(0, strcmp("5", buffer.start()));
CHECK_EQ(-323, point);
BignumDtoa(min_double, BIGNUM_DTOA_FIXED, 5, buffer, &length, &point);
CHECK_GE(5, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("", buffer.start());
+ CHECK_EQ(0, strcmp("", buffer.start()));
BignumDtoa(min_double, BIGNUM_DTOA_PRECISION, 5, buffer, &length, &point);
CHECK_GE(5, length);
TrimRepresentation(buffer);
- CHECK_EQ("49407", buffer.start());
+ CHECK_EQ(0, strcmp("49407", buffer.start()));
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
BignumDtoa(max_double, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ("17976931348623157", buffer.start());
+ CHECK_EQ(0, strcmp("17976931348623157", buffer.start()));
CHECK_EQ(309, point);
BignumDtoa(max_double, BIGNUM_DTOA_PRECISION, 7, buffer, &length, &point);
CHECK_GE(7, length);
TrimRepresentation(buffer);
- CHECK_EQ("1797693", buffer.start());
+ CHECK_EQ(0, strcmp("1797693", buffer.start()));
CHECK_EQ(309, point);
BignumDtoa(4294967272.0, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ("4294967272", buffer.start());
+ CHECK_EQ(0, strcmp("4294967272", buffer.start()));
CHECK_EQ(10, point);
BignumDtoa(4294967272.0, BIGNUM_DTOA_FIXED, 5, buffer, &length, &point);
- CHECK_EQ("429496727200000", buffer.start());
+ CHECK_EQ(0, strcmp("429496727200000", buffer.start()));
CHECK_EQ(10, point);
BignumDtoa(4294967272.0, BIGNUM_DTOA_PRECISION, 14, buffer, &length, &point);
CHECK_GE(14, length);
TrimRepresentation(buffer);
- CHECK_EQ("4294967272", buffer.start());
+ CHECK_EQ(0, strcmp("4294967272", buffer.start()));
CHECK_EQ(10, point);
BignumDtoa(4.1855804968213567e298, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ("4185580496821357", buffer.start());
+ CHECK_EQ(0, strcmp("4185580496821357", buffer.start()));
CHECK_EQ(299, point);
BignumDtoa(4.1855804968213567e298, BIGNUM_DTOA_PRECISION, 20,
buffer, &length, &point);
CHECK_GE(20, length);
TrimRepresentation(buffer);
- CHECK_EQ("41855804968213567225", buffer.start());
+ CHECK_EQ(0, strcmp("41855804968213567225", buffer.start()));
CHECK_EQ(299, point);
BignumDtoa(5.5626846462680035e-309, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ("5562684646268003", buffer.start());
+ CHECK_EQ(0, strcmp("5562684646268003", buffer.start()));
CHECK_EQ(-308, point);
BignumDtoa(5.5626846462680035e-309, BIGNUM_DTOA_PRECISION, 1,
buffer, &length, &point);
CHECK_GE(1, length);
TrimRepresentation(buffer);
- CHECK_EQ("6", buffer.start());
+ CHECK_EQ(0, strcmp("6", buffer.start()));
CHECK_EQ(-308, point);
BignumDtoa(2147483648.0, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ("2147483648", buffer.start());
+ CHECK_EQ(0, strcmp("2147483648", buffer.start()));
CHECK_EQ(10, point);
@@ -170,86 +170,86 @@ TEST(BignumDtoaVariousDoubles) {
buffer, &length, &point);
CHECK_GE(2, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("2147483648", buffer.start());
+ CHECK_EQ(0, strcmp("2147483648", buffer.start()));
CHECK_EQ(10, point);
BignumDtoa(2147483648.0, BIGNUM_DTOA_PRECISION, 5,
buffer, &length, &point);
CHECK_GE(5, length);
TrimRepresentation(buffer);
- CHECK_EQ("21475", buffer.start());
+ CHECK_EQ(0, strcmp("21475", buffer.start()));
CHECK_EQ(10, point);
BignumDtoa(3.5844466002796428e+298, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ("35844466002796428", buffer.start());
+ CHECK_EQ(0, strcmp("35844466002796428", buffer.start()));
CHECK_EQ(299, point);
BignumDtoa(3.5844466002796428e+298, BIGNUM_DTOA_PRECISION, 10,
buffer, &length, &point);
CHECK_GE(10, length);
TrimRepresentation(buffer);
- CHECK_EQ("35844466", buffer.start());
+ CHECK_EQ(0, strcmp("35844466", buffer.start()));
CHECK_EQ(299, point);
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
BignumDtoa(v, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ("22250738585072014", buffer.start());
+ CHECK_EQ(0, strcmp("22250738585072014", buffer.start()));
CHECK_EQ(-307, point);
BignumDtoa(v, BIGNUM_DTOA_PRECISION, 20, buffer, &length, &point);
CHECK_GE(20, length);
TrimRepresentation(buffer);
- CHECK_EQ("22250738585072013831", buffer.start());
+ CHECK_EQ(0, strcmp("22250738585072013831", buffer.start()));
CHECK_EQ(-307, point);
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
v = Double(largest_denormal64).value();
BignumDtoa(v, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ("2225073858507201", buffer.start());
+ CHECK_EQ(0, strcmp("2225073858507201", buffer.start()));
CHECK_EQ(-307, point);
BignumDtoa(v, BIGNUM_DTOA_PRECISION, 20, buffer, &length, &point);
CHECK_GE(20, length);
TrimRepresentation(buffer);
- CHECK_EQ("2225073858507200889", buffer.start());
+ CHECK_EQ(0, strcmp("2225073858507200889", buffer.start()));
CHECK_EQ(-307, point);
BignumDtoa(4128420500802942e-24, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ("4128420500802942", buffer.start());
+ CHECK_EQ(0, strcmp("4128420500802942", buffer.start()));
CHECK_EQ(-8, point);
v = 3.9292015898194142585311918e-10;
BignumDtoa(v, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ("39292015898194143", buffer.start());
+ CHECK_EQ(0, strcmp("39292015898194143", buffer.start()));
v = 4194304.0;
BignumDtoa(v, BIGNUM_DTOA_FIXED, 5, buffer, &length, &point);
CHECK_GE(5, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("4194304", buffer.start());
+ CHECK_EQ(0, strcmp("4194304", buffer.start()));
v = 3.3161339052167390562200598e-237;
BignumDtoa(v, BIGNUM_DTOA_PRECISION, 19, buffer, &length, &point);
CHECK_GE(19, length);
TrimRepresentation(buffer);
- CHECK_EQ("3316133905216739056", buffer.start());
+ CHECK_EQ(0, strcmp("3316133905216739056", buffer.start()));
CHECK_EQ(-236, point);
v = 7.9885183916008099497815232e+191;
BignumDtoa(v, BIGNUM_DTOA_PRECISION, 4, buffer, &length, &point);
CHECK_GE(4, length);
TrimRepresentation(buffer);
- CHECK_EQ("7989", buffer.start());
+ CHECK_EQ(0, strcmp("7989", buffer.start()));
CHECK_EQ(192, point);
v = 1.0000000000000012800000000e+17;
BignumDtoa(v, BIGNUM_DTOA_FIXED, 1, buffer, &length, &point);
CHECK_GE(1, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("100000000000000128", buffer.start());
+ CHECK_EQ(0, strcmp("100000000000000128", buffer.start()));
CHECK_EQ(18, point);
}
@@ -267,7 +267,7 @@ TEST(BignumDtoaGayShortest) {
double v = current_test.v;
BignumDtoa(v, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
CHECK_EQ(current_test.decimal_point, point);
- CHECK_EQ(current_test.representation, buffer.start());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
}
}
@@ -288,7 +288,7 @@ TEST(BignumDtoaGayFixed) {
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length - point);
TrimRepresentation(buffer);
- CHECK_EQ(current_test.representation, buffer.start());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
}
}
@@ -310,6 +310,6 @@ TEST(BignumDtoaGayPrecision) {
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length);
TrimRepresentation(buffer);
- CHECK_EQ(current_test.representation, buffer.start());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
}
}
diff --git a/deps/v8/test/cctest/test-bignum.cc b/deps/v8/test/cctest/test-bignum.cc
index 47ce2a48a9..2761c70a2e 100644
--- a/deps/v8/test/cctest/test-bignum.cc
+++ b/deps/v8/test/cctest/test-bignum.cc
@@ -54,66 +54,66 @@ TEST(Assign) {
Bignum bignum2;
bignum.AssignUInt16(0);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("0", buffer);
+ CHECK_EQ(0, strcmp("0", buffer));
bignum.AssignUInt16(0xA);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A", buffer);
+ CHECK_EQ(0, strcmp("A", buffer));
bignum.AssignUInt16(0x20);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("20", buffer);
+ CHECK_EQ(0, strcmp("20", buffer));
bignum.AssignUInt64(0);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("0", buffer);
+ CHECK_EQ(0, strcmp("0", buffer));
bignum.AssignUInt64(0xA);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A", buffer);
+ CHECK_EQ(0, strcmp("A", buffer));
bignum.AssignUInt64(0x20);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("20", buffer);
+ CHECK_EQ(0, strcmp("20", buffer));
bignum.AssignUInt64(0x100);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("100", buffer);
+ CHECK_EQ(0, strcmp("100", buffer));
// The first real test, since this will not fit into one bigit.
bignum.AssignUInt64(0x12345678);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("12345678", buffer);
+ CHECK_EQ(0, strcmp("12345678", buffer));
uint64_t big = V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF);
bignum.AssignUInt64(big);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFFFFFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFFFFFFFFFF", buffer));
big = V8_2PART_UINT64_C(0x12345678, 9ABCDEF0);
bignum.AssignUInt64(big);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("123456789ABCDEF0", buffer);
+ CHECK_EQ(0, strcmp("123456789ABCDEF0", buffer));
bignum2.AssignBignum(bignum);
CHECK(bignum2.ToHexString(buffer, kBufferSize));
- CHECK_EQ("123456789ABCDEF0", buffer);
+ CHECK_EQ(0, strcmp("123456789ABCDEF0", buffer));
AssignDecimalString(&bignum, "0");
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("0", buffer);
+ CHECK_EQ(0, strcmp("0", buffer));
AssignDecimalString(&bignum, "1");
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
AssignDecimalString(&bignum, "1234567890");
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("499602D2", buffer);
+ CHECK_EQ(0, strcmp("499602D2", buffer));
AssignHexString(&bignum, "0");
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("0", buffer);
+ CHECK_EQ(0, strcmp("0", buffer));
AssignHexString(&bignum, "123456789ABCDEF0");
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("123456789ABCDEF0", buffer);
+ CHECK_EQ(0, strcmp("123456789ABCDEF0", buffer));
}
@@ -123,35 +123,35 @@ TEST(ShiftLeft) {
AssignHexString(&bignum, "0");
bignum.ShiftLeft(100);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("0", buffer);
+ CHECK_EQ(0, strcmp("0", buffer));
AssignHexString(&bignum, "1");
bignum.ShiftLeft(1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2", buffer);
+ CHECK_EQ(0, strcmp("2", buffer));
AssignHexString(&bignum, "1");
bignum.ShiftLeft(4);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10", buffer);
+ CHECK_EQ(0, strcmp("10", buffer));
AssignHexString(&bignum, "1");
bignum.ShiftLeft(32);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("100000000", buffer);
+ CHECK_EQ(0, strcmp("100000000", buffer));
AssignHexString(&bignum, "1");
bignum.ShiftLeft(64);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000", buffer));
AssignHexString(&bignum, "123456789ABCDEF");
bignum.ShiftLeft(64);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("123456789ABCDEF0000000000000000", buffer);
+ CHECK_EQ(0, strcmp("123456789ABCDEF0000000000000000", buffer));
bignum.ShiftLeft(1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2468ACF13579BDE0000000000000000", buffer);
+ CHECK_EQ(0, strcmp("2468ACF13579BDE0000000000000000", buffer));
}
@@ -161,96 +161,96 @@ TEST(AddUInt64) {
AssignHexString(&bignum, "0");
bignum.AddUInt64(0xA);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A", buffer);
+ CHECK_EQ(0, strcmp("A", buffer));
AssignHexString(&bignum, "1");
bignum.AddUInt64(0xA);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("B", buffer);
+ CHECK_EQ(0, strcmp("B", buffer));
AssignHexString(&bignum, "1");
bignum.AddUInt64(0x100);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("101", buffer);
+ CHECK_EQ(0, strcmp("101", buffer));
AssignHexString(&bignum, "1");
bignum.AddUInt64(0xFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000", buffer);
+ CHECK_EQ(0, strcmp("10000", buffer));
AssignHexString(&bignum, "FFFFFFF");
bignum.AddUInt64(0x1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000", buffer);
+ CHECK_EQ(0, strcmp("10000000", buffer));
AssignHexString(&bignum, "10000000000000000000000000000000000000000000");
bignum.AddUInt64(0xFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1000000000000000000000000000000000000000FFFF", buffer);
+ CHECK_EQ(0, strcmp("1000000000000000000000000000000000000000FFFF", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF");
bignum.AddUInt64(0x1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("100000000000000000000000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("100000000000000000000000000000000000000000000", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
bignum.AddUInt64(1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000000000001", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000000000001", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
bignum.AddUInt64(0xFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1000000000000000000000FFFF", buffer);
+ CHECK_EQ(0, strcmp("1000000000000000000000FFFF", buffer));
AssignHexString(&bignum, "0");
bignum.AddUInt64(V8_2PART_UINT64_C(0xA, 00000000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A00000000", buffer);
+ CHECK_EQ(0, strcmp("A00000000", buffer));
AssignHexString(&bignum, "1");
bignum.AddUInt64(V8_2PART_UINT64_C(0xA, 00000000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A00000001", buffer);
+ CHECK_EQ(0, strcmp("A00000001", buffer));
AssignHexString(&bignum, "1");
bignum.AddUInt64(V8_2PART_UINT64_C(0x100, 00000000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000001", buffer);
+ CHECK_EQ(0, strcmp("10000000001", buffer));
AssignHexString(&bignum, "1");
bignum.AddUInt64(V8_2PART_UINT64_C(0xFFFF, 00000000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFF00000001", buffer);
+ CHECK_EQ(0, strcmp("FFFF00000001", buffer));
AssignHexString(&bignum, "FFFFFFF");
bignum.AddUInt64(V8_2PART_UINT64_C(0x1, 00000000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10FFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("10FFFFFFF", buffer));
AssignHexString(&bignum, "10000000000000000000000000000000000000000000");
bignum.AddUInt64(V8_2PART_UINT64_C(0xFFFF, 00000000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000000000000000000FFFF00000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000000000000000000FFFF00000000", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF");
bignum.AddUInt64(V8_2PART_UINT64_C(0x1, 00000000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1000000000000000000000000000000000000FFFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("1000000000000000000000000000000000000FFFFFFFF", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
bignum.AddUInt64(V8_2PART_UINT64_C(0x1, 00000000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000100000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000100000000", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
bignum.AddUInt64(V8_2PART_UINT64_C(0xFFFF, 00000000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000FFFF00000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000FFFF00000000", buffer));
}
@@ -263,55 +263,55 @@ TEST(AddBignum) {
AssignHexString(&bignum, "0");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
AssignHexString(&bignum, "1");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2", buffer);
+ CHECK_EQ(0, strcmp("2", buffer));
AssignHexString(&bignum, "FFFFFFF");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000", buffer);
+ CHECK_EQ(0, strcmp("10000000", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFF");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("100000000000000", buffer);
+ CHECK_EQ(0, strcmp("100000000000000", buffer));
AssignHexString(&bignum, "10000000000000000000000000000000000000000000");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000000000000000000000000000001", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000000000000000000000000000001", buffer));
AssignHexString(&other, "1000000000000");
AssignHexString(&bignum, "1");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1000000000001", buffer);
+ CHECK_EQ(0, strcmp("1000000000001", buffer));
AssignHexString(&bignum, "FFFFFFF");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("100000FFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("100000FFFFFFF", buffer));
AssignHexString(&bignum, "10000000000000000000000000000000000000000000");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000000000000000001000000000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000000000000000001000000000000", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1000000000000000000000000000000FFFFFFFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("1000000000000000000000000000000FFFFFFFFFFFF", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000001000000000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000001000000000000", buffer));
other.ShiftLeft(64);
// other == "10000000000000000000000000000"
@@ -319,28 +319,28 @@ TEST(AddBignum) {
bignum.AssignUInt16(0x1);
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000000000000001", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000000000000001", buffer));
AssignHexString(&bignum, "FFFFFFF");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1000000000000000000000FFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("1000000000000000000000FFFFFFF", buffer));
AssignHexString(&bignum, "10000000000000000000000000000000000000000000");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000010000000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000010000000000000000000000000000", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF");
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("100000000000000FFFFFFFFFFFFFFFFFFFFFFFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("100000000000000FFFFFFFFFFFFFFFFFFFFFFFFFFFF", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
bignum.AddBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10010000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("10010000000000000000000000000", buffer));
}
@@ -353,55 +353,55 @@ TEST(SubtractBignum) {
AssignHexString(&other, "0");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
AssignHexString(&bignum, "2");
AssignHexString(&other, "0");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2", buffer);
+ CHECK_EQ(0, strcmp("2", buffer));
AssignHexString(&bignum, "10000000");
AssignHexString(&other, "1");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFF", buffer));
AssignHexString(&bignum, "100000000000000");
AssignHexString(&other, "1");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFFFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFFFFFFFF", buffer));
AssignHexString(&bignum, "10000000000000000000000000000000000000000001");
AssignHexString(&other, "1");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000000000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000000000000000000000000000000", buffer));
AssignHexString(&bignum, "1000000000001");
AssignHexString(&other, "1000000000000");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
AssignHexString(&bignum, "100000FFFFFFF");
AssignHexString(&other, "1000000000000");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFF", buffer));
AssignHexString(&bignum, "10000000000000000000000000000001000000000000");
AssignHexString(&other, "1000000000000");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000000000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000000000000000000000000000000", buffer));
AssignHexString(&bignum, "1000000000000000000000000000000FFFFFFFFFFFF");
AssignHexString(&other, "1000000000000");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
@@ -409,7 +409,7 @@ TEST(SubtractBignum) {
AssignHexString(&other, "1000000000000");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFFFFFFF000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFFFFFFF000000000000", buffer));
AssignHexString(&other, "1000000000000");
other.ShiftLeft(48);
@@ -420,7 +420,7 @@ TEST(SubtractBignum) {
// bignum == "10000000000000000000000000"
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("F000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("F000000000000000000000000", buffer));
other.AssignUInt16(0x1);
other.ShiftLeft(35);
@@ -430,17 +430,17 @@ TEST(SubtractBignum) {
// bignum = FFFFFFF000000000000000
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFEFFFFFF800000000", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFEFFFFFF800000000", buffer));
AssignHexString(&bignum, "10000000000000000000000000000000000000000000");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF800000000", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF800000000", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF");
bignum.SubtractBignum(other);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFF", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFF", buffer));
}
@@ -451,97 +451,97 @@ TEST(MultiplyUInt32) {
AssignHexString(&bignum, "0");
bignum.MultiplyByUInt32(0x25);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("0", buffer);
+ CHECK_EQ(0, strcmp("0", buffer));
AssignHexString(&bignum, "2");
bignum.MultiplyByUInt32(0x5);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A", buffer);
+ CHECK_EQ(0, strcmp("A", buffer));
AssignHexString(&bignum, "10000000");
bignum.MultiplyByUInt32(0x9);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("90000000", buffer);
+ CHECK_EQ(0, strcmp("90000000", buffer));
AssignHexString(&bignum, "100000000000000");
bignum.MultiplyByUInt32(0xFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFF00000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFF00000000000000", buffer));
AssignHexString(&bignum, "100000000000000");
bignum.MultiplyByUInt32(0xFFFFFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFF00000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFF00000000000000", buffer));
AssignHexString(&bignum, "1234567ABCD");
bignum.MultiplyByUInt32(0xFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("12333335552433", buffer);
+ CHECK_EQ(0, strcmp("12333335552433", buffer));
AssignHexString(&bignum, "1234567ABCD");
bignum.MultiplyByUInt32(0xFFFFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("12345679998A985433", buffer);
+ CHECK_EQ(0, strcmp("12345679998A985433", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFF");
bignum.MultiplyByUInt32(0x2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1FFFFFFFFFFFFFFFE", buffer);
+ CHECK_EQ(0, strcmp("1FFFFFFFFFFFFFFFE", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFF");
bignum.MultiplyByUInt32(0x4);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("3FFFFFFFFFFFFFFFC", buffer);
+ CHECK_EQ(0, strcmp("3FFFFFFFFFFFFFFFC", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFF");
bignum.MultiplyByUInt32(0xF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("EFFFFFFFFFFFFFFF1", buffer);
+ CHECK_EQ(0, strcmp("EFFFFFFFFFFFFFFF1", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFF");
bignum.MultiplyByUInt32(0xFFFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFEFFFFFFFFFF000001", buffer);
+ CHECK_EQ(0, strcmp("FFFFFEFFFFFFFFFF000001", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
// "10 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt32(2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("20000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("20000000000000000000000000", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
// "10 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt32(0xF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("F0000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("F0000000000000000000000000", buffer));
bignum.AssignUInt16(0xFFFF);
bignum.ShiftLeft(100);
// "FFFF0 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt32(0xFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFE00010000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFE00010000000000000000000000000", buffer));
bignum.AssignUInt16(0xFFFF);
bignum.ShiftLeft(100);
// "FFFF0 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt32(0xFFFFFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFEFFFF00010000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFEFFFF00010000000000000000000000000", buffer));
bignum.AssignUInt16(0xFFFF);
bignum.ShiftLeft(100);
// "FFFF0 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt32(0xFFFFFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFEFFFF00010000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFEFFFF00010000000000000000000000000", buffer));
AssignDecimalString(&bignum, "15611230384529777");
bignum.MultiplyByUInt32(10000000);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("210EDD6D4CDD2580EE80", buffer);
+ CHECK_EQ(0, strcmp("210EDD6D4CDD2580EE80", buffer));
}
@@ -552,97 +552,97 @@ TEST(MultiplyUInt64) {
AssignHexString(&bignum, "0");
bignum.MultiplyByUInt64(0x25);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("0", buffer);
+ CHECK_EQ(0, strcmp("0", buffer));
AssignHexString(&bignum, "2");
bignum.MultiplyByUInt64(0x5);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A", buffer);
+ CHECK_EQ(0, strcmp("A", buffer));
AssignHexString(&bignum, "10000000");
bignum.MultiplyByUInt64(0x9);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("90000000", buffer);
+ CHECK_EQ(0, strcmp("90000000", buffer));
AssignHexString(&bignum, "100000000000000");
bignum.MultiplyByUInt64(0xFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFF00000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFF00000000000000", buffer));
AssignHexString(&bignum, "100000000000000");
bignum.MultiplyByUInt64(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFFFFFFFFFF00000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFFFFFFFFFF00000000000000", buffer));
AssignHexString(&bignum, "1234567ABCD");
bignum.MultiplyByUInt64(0xFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("12333335552433", buffer);
+ CHECK_EQ(0, strcmp("12333335552433", buffer));
AssignHexString(&bignum, "1234567ABCD");
bignum.MultiplyByUInt64(V8_2PART_UINT64_C(0xFF, FFFFFFFF));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1234567ABCBDCBA985433", buffer);
+ CHECK_EQ(0, strcmp("1234567ABCBDCBA985433", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFF");
bignum.MultiplyByUInt64(0x2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1FFFFFFFFFFFFFFFE", buffer);
+ CHECK_EQ(0, strcmp("1FFFFFFFFFFFFFFFE", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFF");
bignum.MultiplyByUInt64(0x4);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("3FFFFFFFFFFFFFFFC", buffer);
+ CHECK_EQ(0, strcmp("3FFFFFFFFFFFFFFFC", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFF");
bignum.MultiplyByUInt64(0xF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("EFFFFFFFFFFFFFFF1", buffer);
+ CHECK_EQ(0, strcmp("EFFFFFFFFFFFFFFF1", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFFFF");
bignum.MultiplyByUInt64(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFFFFFFFFFE0000000000000001", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFFFFFFFFFE0000000000000001", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
// "10 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt64(2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("20000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("20000000000000000000000000", buffer));
bignum.AssignUInt16(0x1);
bignum.ShiftLeft(100);
// "10 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt64(0xF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("F0000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("F0000000000000000000000000", buffer));
bignum.AssignUInt16(0xFFFF);
bignum.ShiftLeft(100);
// "FFFF0 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt64(0xFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFE00010000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFE00010000000000000000000000000", buffer));
bignum.AssignUInt16(0xFFFF);
bignum.ShiftLeft(100);
// "FFFF0 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt64(0xFFFFFFFF);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFEFFFF00010000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFEFFFF00010000000000000000000000000", buffer));
bignum.AssignUInt16(0xFFFF);
bignum.ShiftLeft(100);
// "FFFF0 0000 0000 0000 0000 0000 0000"
bignum.MultiplyByUInt64(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFEFFFFFFFFFFFF00010000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("FFFEFFFFFFFFFFFF00010000000000000000000000000", buffer));
AssignDecimalString(&bignum, "15611230384529777");
bignum.MultiplyByUInt64(V8_2PART_UINT64_C(0x8ac72304, 89e80000));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1E10EE4B11D15A7F3DE7F3C7680000", buffer);
+ CHECK_EQ(0, strcmp("1E10EE4B11D15A7F3DE7F3C7680000", buffer));
}
@@ -653,204 +653,216 @@ TEST(MultiplyPowerOfTen) {
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("3034", buffer);
+ CHECK_EQ(0, strcmp("3034", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1E208", buffer);
+ CHECK_EQ(0, strcmp("1E208", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(3);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("12D450", buffer);
+ CHECK_EQ(0, strcmp("12D450", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(4);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("BC4B20", buffer);
+ CHECK_EQ(0, strcmp("BC4B20", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(5);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("75AEF40", buffer);
+ CHECK_EQ(0, strcmp("75AEF40", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(6);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("498D5880", buffer);
+ CHECK_EQ(0, strcmp("498D5880", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(7);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2DF857500", buffer);
+ CHECK_EQ(0, strcmp("2DF857500", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(8);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1CBB369200", buffer);
+ CHECK_EQ(0, strcmp("1CBB369200", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(9);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("11F5021B400", buffer);
+ CHECK_EQ(0, strcmp("11F5021B400", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(10);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("B3921510800", buffer);
+ CHECK_EQ(0, strcmp("B3921510800", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(11);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("703B4D2A5000", buffer);
+ CHECK_EQ(0, strcmp("703B4D2A5000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(12);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("4625103A72000", buffer);
+ CHECK_EQ(0, strcmp("4625103A72000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(13);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2BD72A24874000", buffer);
+ CHECK_EQ(0, strcmp("2BD72A24874000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(14);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1B667A56D488000", buffer);
+ CHECK_EQ(0, strcmp("1B667A56D488000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(15);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("11200C7644D50000", buffer);
+ CHECK_EQ(0, strcmp("11200C7644D50000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(16);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("AB407C9EB0520000", buffer);
+ CHECK_EQ(0, strcmp("AB407C9EB0520000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(17);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("6B084DE32E3340000", buffer);
+ CHECK_EQ(0, strcmp("6B084DE32E3340000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(18);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("42E530ADFCE0080000", buffer);
+ CHECK_EQ(0, strcmp("42E530ADFCE0080000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(19);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("29CF3E6CBE0C0500000", buffer);
+ CHECK_EQ(0, strcmp("29CF3E6CBE0C0500000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(20);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1A218703F6C783200000", buffer);
+ CHECK_EQ(0, strcmp("1A218703F6C783200000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(21);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1054F4627A3CB1F400000", buffer);
+ CHECK_EQ(0, strcmp("1054F4627A3CB1F400000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(22);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A3518BD8C65EF38800000", buffer);
+ CHECK_EQ(0, strcmp("A3518BD8C65EF38800000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(23);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("6612F7677BFB5835000000", buffer);
+ CHECK_EQ(0, strcmp("6612F7677BFB5835000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(24);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("3FCBDAA0AD7D17212000000", buffer);
+ CHECK_EQ(0, strcmp("3FCBDAA0AD7D17212000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(25);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("27DF68A46C6E2E74B4000000", buffer);
+ CHECK_EQ(0, strcmp("27DF68A46C6E2E74B4000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(26);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("18EBA166C3C4DD08F08000000", buffer);
+ CHECK_EQ(0, strcmp("18EBA166C3C4DD08F08000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(27);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("F9344E03A5B0A259650000000", buffer);
+ CHECK_EQ(0, strcmp("F9344E03A5B0A259650000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(28);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("9BC0B0C2478E6577DF20000000", buffer);
+ CHECK_EQ(0, strcmp("9BC0B0C2478E6577DF20000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(29);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("61586E796CB8FF6AEB740000000", buffer);
+ CHECK_EQ(0, strcmp("61586E796CB8FF6AEB740000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(30);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("3CD7450BE3F39FA2D32880000000", buffer);
+ CHECK_EQ(0, strcmp("3CD7450BE3F39FA2D32880000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(31);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("26068B276E7843C5C3F9500000000", buffer);
+ CHECK_EQ(0, strcmp("26068B276E7843C5C3F9500000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(50);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("149D1B4CFED03B23AB5F4E1196EF45C08000000000000", buffer);
+ CHECK_EQ(0, strcmp("149D1B4CFED03B23AB5F4E1196EF45C08000000000000", buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(100);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("5827249F27165024FBC47DFCA9359BF316332D1B91ACEECF471FBAB06D9B2"
- "0000000000000000000000000", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "5827249F27165024FBC47DFCA9359BF316332D1B91ACEECF471FBAB06D9B2"
+ "0000000000000000000000000",
+ buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(200);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("64C1F5C06C3816AFBF8DAFD5A3D756365BB0FD020E6F084E759C1F7C99E4F"
- "55B9ACC667CEC477EB958C2AEEB3C6C19BA35A1AD30B35C51EB72040920000"
- "0000000000000000000000000000000000000000000000", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "64C1F5C06C3816AFBF8DAFD5A3D756365BB0FD020E6F084E759C1F7C99E4F"
+ "55B9ACC667CEC477EB958C2AEEB3C6C19BA35A1AD30B35C51EB72040920000"
+ "0000000000000000000000000000000000000000000000",
+ buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(500);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("96741A625EB5D7C91039FEB5C5ACD6D9831EDA5B083D800E6019442C8C8223"
- "3EAFB3501FE2058062221E15121334928880827DEE1EC337A8B26489F3A40A"
- "CB440A2423734472D10BFCE886F41B3AF9F9503013D86D088929CA86EEB4D8"
- "B9C831D0BD53327B994A0326227CFD0ECBF2EB48B02387AAE2D4CCCDF1F1A1"
- "B8CC4F1FA2C56AD40D0E4DAA9C28CDBF0A549098EA13200000000000000000"
- "00000000000000000000000000000000000000000000000000000000000000"
- "0000000000000000000000000000000000000000000000", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "96741A625EB5D7C91039FEB5C5ACD6D9831EDA5B083D800E6019442C8C8223"
+ "3EAFB3501FE2058062221E15121334928880827DEE1EC337A8B26489F3A40A"
+ "CB440A2423734472D10BFCE886F41B3AF9F9503013D86D088929CA86EEB4D8"
+ "B9C831D0BD53327B994A0326227CFD0ECBF2EB48B02387AAE2D4CCCDF1F1A1"
+ "B8CC4F1FA2C56AD40D0E4DAA9C28CDBF0A549098EA13200000000000000000"
+ "00000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000",
+ buffer));
AssignDecimalString(&bignum, "1234");
bignum.MultiplyByPowerOfTen(1000);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1258040F99B1CD1CC9819C676D413EA50E4A6A8F114BB0C65418C62D399B81"
- "6361466CA8E095193E1EE97173553597C96673AF67FAFE27A66E7EF2E5EF2E"
- "E3F5F5070CC17FE83BA53D40A66A666A02F9E00B0E11328D2224B8694C7372"
- "F3D536A0AD1985911BD361496F268E8B23112500EAF9B88A9BC67B2AB04D38"
- "7FEFACD00F5AF4F764F9ABC3ABCDE54612DE38CD90CB6647CA389EA0E86B16"
- "BF7A1F34086E05ADBE00BD1673BE00FAC4B34AF1091E8AD50BA675E0381440"
- "EA8E9D93E75D816BAB37C9844B1441C38FC65CF30ABB71B36433AF26DD97BD"
- "ABBA96C03B4919B8F3515B92826B85462833380DC193D79F69D20DD6038C99"
- "6114EF6C446F0BA28CC772ACBA58B81C04F8FFDE7B18C4E5A3ABC51E637FDF"
- "6E37FDFF04C940919390F4FF92000000000000000000000000000000000000"
- "00000000000000000000000000000000000000000000000000000000000000"
- "00000000000000000000000000000000000000000000000000000000000000"
- "00000000000000000000000000000000000000000000000000000000000000"
- "0000000000000000000000000000", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "1258040F99B1CD1CC9819C676D413EA50E4A6A8F114BB0C65418C62D399B81"
+ "6361466CA8E095193E1EE97173553597C96673AF67FAFE27A66E7EF2E5EF2E"
+ "E3F5F5070CC17FE83BA53D40A66A666A02F9E00B0E11328D2224B8694C7372"
+ "F3D536A0AD1985911BD361496F268E8B23112500EAF9B88A9BC67B2AB04D38"
+ "7FEFACD00F5AF4F764F9ABC3ABCDE54612DE38CD90CB6647CA389EA0E86B16"
+ "BF7A1F34086E05ADBE00BD1673BE00FAC4B34AF1091E8AD50BA675E0381440"
+ "EA8E9D93E75D816BAB37C9844B1441C38FC65CF30ABB71B36433AF26DD97BD"
+ "ABBA96C03B4919B8F3515B92826B85462833380DC193D79F69D20DD6038C99"
+ "6114EF6C446F0BA28CC772ACBA58B81C04F8FFDE7B18C4E5A3ABC51E637FDF"
+ "6E37FDFF04C940919390F4FF92000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000",
+ buffer));
Bignum bignum2;
AssignHexString(&bignum2, "3DA774C07FB5DF54284D09C675A492165B830D5DAAEB2A7501"
@@ -863,116 +875,142 @@ TEST(MultiplyPowerOfTen) {
"567C794D0BFE338DFBB42D92D4215AF3BB22BF0A8B283FDDC2"
"C667A10958EA6D2");
CHECK(bignum2.ToHexString(buffer, kBufferSize));
- CHECK_EQ("3DA774C07FB5DF54284D09C675A492165B830D5DAAEB2A7501"
- "DA17CF9DFA1CA2282269F92A25A97314296B717E3DCBB9FE17"
- "41A842FE2913F540F40796F2381155763502C58B15AF7A7F88"
- "6F744C9164FF409A28F7FA0C41F89ED79C1BE9F322C8578B97"
- "841F1CBAA17D901BE1230E3C00E1C643AF32638B5674E01FEA"
- "96FC90864E621B856A9E1CE56E6EB545B9C2F8F0CC10DDA88D"
- "CC6D282605F8DB67044F2DFD3695E7BA63877AE16701536AE6"
- "567C794D0BFE338DFBB42D92D4215AF3BB22BF0A8B283FDDC2"
- "C667A10958EA6D2", buffer);
+ CHECK_EQ(0, strcmp(
+ "3DA774C07FB5DF54284D09C675A492165B830D5DAAEB2A7501"
+ "DA17CF9DFA1CA2282269F92A25A97314296B717E3DCBB9FE17"
+ "41A842FE2913F540F40796F2381155763502C58B15AF7A7F88"
+ "6F744C9164FF409A28F7FA0C41F89ED79C1BE9F322C8578B97"
+ "841F1CBAA17D901BE1230E3C00E1C643AF32638B5674E01FEA"
+ "96FC90864E621B856A9E1CE56E6EB545B9C2F8F0CC10DDA88D"
+ "CC6D282605F8DB67044F2DFD3695E7BA63877AE16701536AE6"
+ "567C794D0BFE338DFBB42D92D4215AF3BB22BF0A8B283FDDC2"
+ "C667A10958EA6D2",
+ buffer));
bignum.AssignBignum(bignum2);
bignum.MultiplyByPowerOfTen(1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2688A8F84FD1AB949930261C0986DB4DF931E85A8AD2FA8921284EE1C2BC51"
- "E55915823BBA5789E7EC99E326EEE69F543ECE890929DED9AC79489884BE57"
- "630AD569E121BB76ED8DAC8FB545A8AFDADF1F8860599AFC47A93B6346C191"
- "7237F5BD36B73EB29371F4A4EE7A116CB5E8E5808D1BEA4D7F7E3716090C13"
- "F29E5DDA53F0FD513362A2D20F6505314B9419DB967F8A8A89589FC43917C3"
- "BB892062B17CBE421DB0D47E34ACCCE060D422CFF60DCBD0277EE038BD509C"
- "7BC494D8D854F5B76696F927EA99BC00C4A5D7928434", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "2688A8F84FD1AB949930261C0986DB4DF931E85A8AD2FA8921284EE1C2BC51"
+ "E55915823BBA5789E7EC99E326EEE69F543ECE890929DED9AC79489884BE57"
+ "630AD569E121BB76ED8DAC8FB545A8AFDADF1F8860599AFC47A93B6346C191"
+ "7237F5BD36B73EB29371F4A4EE7A116CB5E8E5808D1BEA4D7F7E3716090C13"
+ "F29E5DDA53F0FD513362A2D20F6505314B9419DB967F8A8A89589FC43917C3"
+ "BB892062B17CBE421DB0D47E34ACCCE060D422CFF60DCBD0277EE038BD509C"
+ "7BC494D8D854F5B76696F927EA99BC00C4A5D7928434",
+ buffer));
bignum.AssignBignum(bignum2);
bignum.MultiplyByPowerOfTen(2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1815699B31E30B3CDFBE17D185F44910BBBF313896C3DC95B4B9314D19B5B32"
- "F57AD71655476B630F3E02DF855502394A74115A5BA2B480BCBCD5F52F6F69D"
- "E6C5622CB5152A54788BD9D14B896DE8CB73B53C3800DDACC9C51E0C38FAE76"
- "2F9964232872F9C2738E7150C4AE3F1B18F70583172706FAEE26DC5A78C77A2"
- "FAA874769E52C01DA5C3499F233ECF3C90293E0FB69695D763DAA3AEDA5535B"
- "43DAEEDF6E9528E84CEE0EC000C3C8495C1F9C89F6218AF4C23765261CD5ADD"
- "0787351992A01E5BB8F2A015807AE7A6BB92A08", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "1815699B31E30B3CDFBE17D185F44910BBBF313896C3DC95B4B9314D19B5B32"
+ "F57AD71655476B630F3E02DF855502394A74115A5BA2B480BCBCD5F52F6F69D"
+ "E6C5622CB5152A54788BD9D14B896DE8CB73B53C3800DDACC9C51E0C38FAE76"
+ "2F9964232872F9C2738E7150C4AE3F1B18F70583172706FAEE26DC5A78C77A2"
+ "FAA874769E52C01DA5C3499F233ECF3C90293E0FB69695D763DAA3AEDA5535B"
+ "43DAEEDF6E9528E84CEE0EC000C3C8495C1F9C89F6218AF4C23765261CD5ADD"
+ "0787351992A01E5BB8F2A015807AE7A6BB92A08",
+ buffer));
bignum.AssignBignum(bignum2);
bignum.MultiplyByPowerOfTen(5);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("5E13A4863ADEE3E5C9FE8D0A73423D695D62D8450CED15A8C9F368952C6DC3"
- "F0EE7D82F3D1EFB7AF38A3B3920D410AFCAD563C8F5F39116E141A3C5C14B3"
- "58CD73077EA35AAD59F6E24AD98F10D5555ABBFBF33AC361EAF429FD5FBE94"
- "17DA9EF2F2956011F9F93646AA38048A681D984ED88127073443247CCC167C"
- "B354A32206EF5A733E73CF82D795A1AD598493211A6D613C39515E0E0F6304"
- "DCD9C810F3518C7F6A7CB6C81E99E02FCC65E8FDB7B7AE97306CC16A8631CE"
- "0A2AEF6568276BE4C176964A73C153FDE018E34CB4C2F40", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "5E13A4863ADEE3E5C9FE8D0A73423D695D62D8450CED15A8C9F368952C6DC3"
+ "F0EE7D82F3D1EFB7AF38A3B3920D410AFCAD563C8F5F39116E141A3C5C14B3"
+ "58CD73077EA35AAD59F6E24AD98F10D5555ABBFBF33AC361EAF429FD5FBE94"
+ "17DA9EF2F2956011F9F93646AA38048A681D984ED88127073443247CCC167C"
+ "B354A32206EF5A733E73CF82D795A1AD598493211A6D613C39515E0E0F6304"
+ "DCD9C810F3518C7F6A7CB6C81E99E02FCC65E8FDB7B7AE97306CC16A8631CE"
+ "0A2AEF6568276BE4C176964A73C153FDE018E34CB4C2F40",
+ buffer));
bignum.AssignBignum(bignum2);
bignum.MultiplyByPowerOfTen(10);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("8F8CB8EB51945A7E815809F6121EF2F4E61EF3405CD9432CAD2709749EEAFD"
- "1B81E843F14A3667A7BDCCC9E0BB795F63CDFDB62844AC7438976C885A0116"
- "29607DA54F9C023CC366570B7637ED0F855D931752038A614922D0923E382C"
- "B8E5F6C975672DB76E0DE471937BB9EDB11E28874F1C122D5E1EF38CECE9D0"
- "0723056BCBD4F964192B76830634B1D322B7EB0062F3267E84F5C824343A77"
- "4B7DCEE6DD464F01EBDC8C671BB18BB4EF4300A42474A6C77243F2A12B03BF"
- "0443C38A1C0D2701EDB393135AE0DEC94211F9D4EB51F990800", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "8F8CB8EB51945A7E815809F6121EF2F4E61EF3405CD9432CAD2709749EEAFD"
+ "1B81E843F14A3667A7BDCCC9E0BB795F63CDFDB62844AC7438976C885A0116"
+ "29607DA54F9C023CC366570B7637ED0F855D931752038A614922D0923E382C"
+ "B8E5F6C975672DB76E0DE471937BB9EDB11E28874F1C122D5E1EF38CECE9D0"
+ "0723056BCBD4F964192B76830634B1D322B7EB0062F3267E84F5C824343A77"
+ "4B7DCEE6DD464F01EBDC8C671BB18BB4EF4300A42474A6C77243F2A12B03BF"
+ "0443C38A1C0D2701EDB393135AE0DEC94211F9D4EB51F990800",
+ buffer));
bignum.AssignBignum(bignum2);
bignum.MultiplyByPowerOfTen(50);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("107A8BE345E24407372FC1DE442CBA696BC23C4FFD5B4BDFD9E5C39559815"
- "86628CF8472D2D589F2FC2BAD6E0816EC72CBF85CCA663D8A1EC6C51076D8"
- "2D247E6C26811B7EC4D4300FB1F91028DCB7B2C4E7A60C151161AA7E65E79"
- "B40917B12B2B5FBE7745984D4E8EFA31F9AE6062427B068B144A9CB155873"
- "E7C0C9F0115E5AC72DC5A73C4796DB970BF9205AB8C77A6996EB1B417F9D1"
- "6232431E6313C392203601B9C22CC10DDA88DCC6D282605F8DB67044F2DFD"
- "3695E7BA63877AE16701536AE6567C794D0BFE338DFBB42D924CF964BD2C0"
- "F586E03A2FCD35A408000000000000", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "107A8BE345E24407372FC1DE442CBA696BC23C4FFD5B4BDFD9E5C39559815"
+ "86628CF8472D2D589F2FC2BAD6E0816EC72CBF85CCA663D8A1EC6C51076D8"
+ "2D247E6C26811B7EC4D4300FB1F91028DCB7B2C4E7A60C151161AA7E65E79"
+ "B40917B12B2B5FBE7745984D4E8EFA31F9AE6062427B068B144A9CB155873"
+ "E7C0C9F0115E5AC72DC5A73C4796DB970BF9205AB8C77A6996EB1B417F9D1"
+ "6232431E6313C392203601B9C22CC10DDA88DCC6D282605F8DB67044F2DFD"
+ "3695E7BA63877AE16701536AE6567C794D0BFE338DFBB42D924CF964BD2C0"
+ "F586E03A2FCD35A408000000000000",
+ buffer));
bignum.AssignBignum(bignum2);
bignum.MultiplyByPowerOfTen(100);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("46784A90ACD0ED3E7759CC585FB32D36EB6034A6F78D92604E3BAA5ED3D8B"
- "6E60E854439BE448897FB4B7EA5A3D873AA0FCB3CFFD80D0530880E45F511"
- "722A50CE7E058B5A6F5464DB7500E34984EE3202A9441F44FA1554C0CEA96"
- "B438A36F25E7C9D56D71AE2CD313EC37534DA299AC0854FC48591A7CF3171"
- "31265AA4AE62DE32344CE7BEEEF894AE686A2DAAFE5D6D9A10971FFD9C064"
- "5079B209E1048F58B5192D41D84336AC4C8C489EEF00939CFC9D55C122036"
- "01B9C22CC10DDA88DCC6D282605F8DB67044F2DFD3695E7BA3F67B96D3A32"
- "E11FB5561B68744C4035B0800DC166D49D98E3FD1D5BB2000000000000000"
- "0000000000", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "46784A90ACD0ED3E7759CC585FB32D36EB6034A6F78D92604E3BAA5ED3D8B"
+ "6E60E854439BE448897FB4B7EA5A3D873AA0FCB3CFFD80D0530880E45F511"
+ "722A50CE7E058B5A6F5464DB7500E34984EE3202A9441F44FA1554C0CEA96"
+ "B438A36F25E7C9D56D71AE2CD313EC37534DA299AC0854FC48591A7CF3171"
+ "31265AA4AE62DE32344CE7BEEEF894AE686A2DAAFE5D6D9A10971FFD9C064"
+ "5079B209E1048F58B5192D41D84336AC4C8C489EEF00939CFC9D55C122036"
+ "01B9C22CC10DDA88DCC6D282605F8DB67044F2DFD3695E7BA3F67B96D3A32"
+ "E11FB5561B68744C4035B0800DC166D49D98E3FD1D5BB2000000000000000"
+ "0000000000",
+ buffer));
bignum.AssignBignum(bignum2);
bignum.MultiplyByPowerOfTen(200);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("508BD351221DF139D72D88CDC0416845A53EE2D0E6B98352509A9AC312F8C"
- "6CB1A144889416201E0B6CE66EA3EBE259B5FD79ECFC1FD77963CE516CC7E"
- "2FE73D4B5B710C19F6BCB092C7A2FD76286543B8DBD2C596DFF2C896720BA"
- "DFF7BC9C366ACEA3A880AEC287C5E6207DF2739B5326FC19D773BD830B109"
- "ED36C7086544BF8FDB9D4B73719C2B5BC2F571A5937EC46876CD428281F6B"
- "F287E1E07F25C1B1D46BC37324FF657A8B2E0071DB83B86123CA34004F406"
- "001082D7945E90C6E8C9A9FEC2B44BE0DDA46E9F52B152E4D1336D2FCFBC9"
- "96E30CA0082256737365158FE36482AA7EB9DAF2AB128F10E7551A3CD5BE6"
- "0A922F3A7D5EED38B634A7EC95BCF7021BA6820A292000000000000000000"
- "00000000000000000000000000000000", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "508BD351221DF139D72D88CDC0416845A53EE2D0E6B98352509A9AC312F8C"
+ "6CB1A144889416201E0B6CE66EA3EBE259B5FD79ECFC1FD77963CE516CC7E"
+ "2FE73D4B5B710C19F6BCB092C7A2FD76286543B8DBD2C596DFF2C896720BA"
+ "DFF7BC9C366ACEA3A880AEC287C5E6207DF2739B5326FC19D773BD830B109"
+ "ED36C7086544BF8FDB9D4B73719C2B5BC2F571A5937EC46876CD428281F6B"
+ "F287E1E07F25C1B1D46BC37324FF657A8B2E0071DB83B86123CA34004F406"
+ "001082D7945E90C6E8C9A9FEC2B44BE0DDA46E9F52B152E4D1336D2FCFBC9"
+ "96E30CA0082256737365158FE36482AA7EB9DAF2AB128F10E7551A3CD5BE6"
+ "0A922F3A7D5EED38B634A7EC95BCF7021BA6820A292000000000000000000"
+ "00000000000000000000000000000000",
+ buffer));
bignum.AssignBignum(bignum2);
bignum.MultiplyByPowerOfTen(500);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("7845F900E475B5086885BAAAE67C8E85185ACFE4633727F82A4B06B5582AC"
- "BE933C53357DA0C98C20C5AC900C4D76A97247DF52B79F48F9E35840FB715"
- "D392CE303E22622B0CF82D9471B398457DD3196F639CEE8BBD2C146873841"
- "F0699E6C41F04FC7A54B48CEB995BEB6F50FE81DE9D87A8D7F849CC523553"
- "7B7BBBC1C7CAAFF6E9650BE03B308C6D31012AEF9580F70D3EE2083ADE126"
- "8940FA7D6308E239775DFD2F8C97FF7EBD525DAFA6512216F7047A62A93DC"
- "38A0165BDC67E250DCC96A0181DE935A70B38704DC71819F02FC5261FF7E1"
- "E5F11907678B0A3E519FF4C10A867B0C26CE02BE6960BA8621A87303C101C"
- "3F88798BB9F7739655946F8B5744E6B1EAF10B0C5621330F0079209033C69"
- "20DE2E2C8D324F0624463735D482BF291926C22A910F5B80FA25170B6B57D"
- "8D5928C7BCA3FE87461275F69BD5A1B83181DAAF43E05FC3C72C4E93111B6"
- "6205EBF49B28FEDFB7E7526CBDA658A332000000000000000000000000000"
- "0000000000000000000000000000000000000000000000000000000000000"
- "0000000000000000000000000000000000000", buffer);
+ CHECK_EQ(0,
+ strcmp(
+ "7845F900E475B5086885BAAAE67C8E85185ACFE4633727F82A4B06B5582AC"
+ "BE933C53357DA0C98C20C5AC900C4D76A97247DF52B79F48F9E35840FB715"
+ "D392CE303E22622B0CF82D9471B398457DD3196F639CEE8BBD2C146873841"
+ "F0699E6C41F04FC7A54B48CEB995BEB6F50FE81DE9D87A8D7F849CC523553"
+ "7B7BBBC1C7CAAFF6E9650BE03B308C6D31012AEF9580F70D3EE2083ADE126"
+ "8940FA7D6308E239775DFD2F8C97FF7EBD525DAFA6512216F7047A62A93DC"
+ "38A0165BDC67E250DCC96A0181DE935A70B38704DC71819F02FC5261FF7E1"
+ "E5F11907678B0A3E519FF4C10A867B0C26CE02BE6960BA8621A87303C101C"
+ "3F88798BB9F7739655946F8B5744E6B1EAF10B0C5621330F0079209033C69"
+ "20DE2E2C8D324F0624463735D482BF291926C22A910F5B80FA25170B6B57D"
+ "8D5928C7BCA3FE87461275F69BD5A1B83181DAAF43E05FC3C72C4E93111B6"
+ "6205EBF49B28FEDFB7E7526CBDA658A332000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000",
+ buffer));
}
@@ -986,20 +1024,20 @@ TEST(DivideModuloIntBignum) {
other.AssignUInt16(2);
CHECK_EQ(5, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("0", buffer);
+ CHECK_EQ(0, strcmp("0", buffer));
bignum.AssignUInt16(10);
bignum.ShiftLeft(500);
other.AssignUInt16(2);
other.ShiftLeft(500);
CHECK_EQ(5, bignum.DivideModuloIntBignum(other));
- CHECK_EQ("0", buffer);
+ CHECK_EQ(0, strcmp("0", buffer));
bignum.AssignUInt16(11);
other.AssignUInt16(2);
CHECK_EQ(5, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignUInt16(10);
bignum.ShiftLeft(500);
@@ -1009,7 +1047,7 @@ TEST(DivideModuloIntBignum) {
other.ShiftLeft(500);
CHECK_EQ(5, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignUInt16(10);
bignum.ShiftLeft(500);
@@ -1019,31 +1057,31 @@ TEST(DivideModuloIntBignum) {
bignum.AddBignum(third);
CHECK_EQ(0x1234, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFF", buffer);
+ CHECK_EQ(0, strcmp("FFF", buffer));
bignum.AssignUInt16(10);
AssignHexString(&other, "1234567890");
CHECK_EQ(0, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A", buffer);
+ CHECK_EQ(0, strcmp("A", buffer));
AssignHexString(&bignum, "12345678");
AssignHexString(&other, "3789012");
CHECK_EQ(5, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("D9861E", buffer);
+ CHECK_EQ(0, strcmp("D9861E", buffer));
AssignHexString(&bignum, "70000001");
AssignHexString(&other, "1FFFFFFF");
CHECK_EQ(3, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000004", buffer);
+ CHECK_EQ(0, strcmp("10000004", buffer));
AssignHexString(&bignum, "28000000");
AssignHexString(&other, "12A05F20");
CHECK_EQ(2, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2BF41C0", buffer);
+ CHECK_EQ(0, strcmp("2BF41C0", buffer));
bignum.AssignUInt16(10);
bignum.ShiftLeft(500);
@@ -1053,10 +1091,10 @@ TEST(DivideModuloIntBignum) {
other.SubtractBignum(third);
CHECK_EQ(0x1234, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1232DCC", buffer);
+ CHECK_EQ(0, strcmp("1232DCC", buffer));
CHECK_EQ(0, bignum.DivideModuloIntBignum(other));
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1232DCC", buffer);
+ CHECK_EQ(0, strcmp("1232DCC", buffer));
}
@@ -1363,27 +1401,27 @@ TEST(Square) {
bignum.AssignUInt16(1);
bignum.Square();
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignUInt16(2);
bignum.Square();
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("4", buffer);
+ CHECK_EQ(0, strcmp("4", buffer));
bignum.AssignUInt16(10);
bignum.Square();
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("64", buffer);
+ CHECK_EQ(0, strcmp("64", buffer));
AssignHexString(&bignum, "FFFFFFF");
bignum.Square();
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFE0000001", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFE0000001", buffer));
AssignHexString(&bignum, "FFFFFFFFFFFFFF");
bignum.Square();
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FFFFFFFFFFFFFE00000000000001", buffer);
+ CHECK_EQ(0, strcmp("FFFFFFFFFFFFFE00000000000001", buffer));
}
@@ -1393,110 +1431,112 @@ TEST(AssignPowerUInt16) {
bignum.AssignPowerUInt16(1, 0);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignPowerUInt16(1, 1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignPowerUInt16(1, 2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignPowerUInt16(2, 0);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignPowerUInt16(2, 1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2", buffer);
+ CHECK_EQ(0, strcmp("2", buffer));
bignum.AssignPowerUInt16(2, 2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("4", buffer);
+ CHECK_EQ(0, strcmp("4", buffer));
bignum.AssignPowerUInt16(16, 1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10", buffer);
+ CHECK_EQ(0, strcmp("10", buffer));
bignum.AssignPowerUInt16(16, 2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("100", buffer);
+ CHECK_EQ(0, strcmp("100", buffer));
bignum.AssignPowerUInt16(16, 5);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("100000", buffer);
+ CHECK_EQ(0, strcmp("100000", buffer));
bignum.AssignPowerUInt16(16, 8);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("100000000", buffer);
+ CHECK_EQ(0, strcmp("100000000", buffer));
bignum.AssignPowerUInt16(16, 16);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000", buffer));
bignum.AssignPowerUInt16(16, 30);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1000000000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("1000000000000000000000000000000", buffer));
bignum.AssignPowerUInt16(10, 0);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignPowerUInt16(10, 1);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("A", buffer);
+ CHECK_EQ(0, strcmp("A", buffer));
bignum.AssignPowerUInt16(10, 2);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("64", buffer);
+ CHECK_EQ(0, strcmp("64", buffer));
bignum.AssignPowerUInt16(10, 5);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("186A0", buffer);
+ CHECK_EQ(0, strcmp("186A0", buffer));
bignum.AssignPowerUInt16(10, 8);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("5F5E100", buffer);
+ CHECK_EQ(0, strcmp("5F5E100", buffer));
bignum.AssignPowerUInt16(10, 16);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("2386F26FC10000", buffer);
+ CHECK_EQ(0, strcmp("2386F26FC10000", buffer));
bignum.AssignPowerUInt16(10, 30);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("C9F2C9CD04674EDEA40000000", buffer);
+ CHECK_EQ(0, strcmp("C9F2C9CD04674EDEA40000000", buffer));
bignum.AssignPowerUInt16(10, 31);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("7E37BE2022C0914B2680000000", buffer);
+ CHECK_EQ(0, strcmp("7E37BE2022C0914B2680000000", buffer));
bignum.AssignPowerUInt16(2, 0);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignPowerUInt16(2, 100);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("10000000000000000000000000", buffer);
+ CHECK_EQ(0, strcmp("10000000000000000000000000", buffer));
bignum.AssignPowerUInt16(17, 0);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1", buffer);
+ CHECK_EQ(0, strcmp("1", buffer));
bignum.AssignPowerUInt16(17, 99);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("1942BB9853FAD924A3D4DD92B89B940E0207BEF05DB9C26BC1B757"
- "80BE0C5A2C2990E02A681224F34ED68558CE4C6E33760931",
- buffer);
+ CHECK_EQ(0, strcmp(
+ "1942BB9853FAD924A3D4DD92B89B940E0207BEF05DB9C26BC1B757"
+ "80BE0C5A2C2990E02A681224F34ED68558CE4C6E33760931",
+ buffer));
bignum.AssignPowerUInt16(0xFFFF, 99);
CHECK(bignum.ToHexString(buffer, kBufferSize));
- CHECK_EQ("FF9D12F09B886C54E77E7439C7D2DED2D34F669654C0C2B6B8C288250"
- "5A2211D0E3DC9A61831349EAE674B11D56E3049D7BD79DAAD6C9FA2BA"
- "528E3A794299F2EE9146A324DAFE3E88967A0358233B543E233E575B9"
- "DD4E3AA7942146426C328FF55BFD5C45E0901B1629260AF9AE2F310C5"
- "50959FAF305C30116D537D80CF6EBDBC15C5694062AF1AC3D956D0A41"
- "B7E1B79FF11E21D83387A1CE1F5882B31E4B5D8DE415BDBE6854466DF"
- "343362267A7E8833119D31D02E18DB5B0E8F6A64B0ED0D0062FFFF",
- buffer);
+ CHECK_EQ(0, strcmp(
+ "FF9D12F09B886C54E77E7439C7D2DED2D34F669654C0C2B6B8C288250"
+ "5A2211D0E3DC9A61831349EAE674B11D56E3049D7BD79DAAD6C9FA2BA"
+ "528E3A794299F2EE9146A324DAFE3E88967A0358233B543E233E575B9"
+ "DD4E3AA7942146426C328FF55BFD5C45E0901B1629260AF9AE2F310C5"
+ "50959FAF305C30116D537D80CF6EBDBC15C5694062AF1AC3D956D0A41"
+ "B7E1B79FF11E21D83387A1CE1F5882B31E4B5D8DE415BDBE6854466DF"
+ "343362267A7E8833119D31D02E18DB5B0E8F6A64B0ED0D0062FFFF",
+ buffer));
}
diff --git a/deps/v8/test/cctest/test-bit-vector.cc b/deps/v8/test/cctest/test-bit-vector.cc
index ac00fabb92..e8571d965e 100644
--- a/deps/v8/test/cctest/test-bit-vector.cc
+++ b/deps/v8/test/cctest/test-bit-vector.cc
@@ -35,7 +35,7 @@
using namespace v8::internal;
TEST(BitVector) {
- Zone zone(CcTest::i_isolate());
+ Zone zone;
{
BitVector v(15, &zone);
v.Add(1);
diff --git a/deps/v8/test/cctest/test-checks.cc b/deps/v8/test/cctest/test-checks.cc
deleted file mode 100644
index 79e87ddd0c..0000000000
--- a/deps/v8/test/cctest/test-checks.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/checks.h"
-
-#include "test/cctest/cctest.h"
-
-
-TEST(CheckEqualsZeroAndMinusZero) {
- CHECK_EQ(0.0, 0.0);
- CHECK_NE(0.0, -0.0);
- CHECK_NE(-0.0, 0.0);
- CHECK_EQ(-0.0, -0.0);
-}
-
-
-TEST(CheckEqualsReflexivity) {
- double inf = V8_INFINITY;
- double nan = v8::base::OS::nan_value();
- double constants[] = {-nan, -inf, -3.1415, -1.0, -0.1, -0.0,
- 0.0, 0.1, 1.0, 3.1415, inf, nan};
- for (size_t i = 0; i < arraysize(constants); ++i) {
- CHECK_EQ(constants[i], constants[i]);
- }
-}
diff --git a/deps/v8/test/cctest/test-circular-queue.cc b/deps/v8/test/cctest/test-circular-queue.cc
index 736a9b7c88..8d0d4f982a 100644
--- a/deps/v8/test/cctest/test-circular-queue.cc
+++ b/deps/v8/test/cctest/test-circular-queue.cc
@@ -42,51 +42,51 @@ TEST(SamplingCircularQueue) {
// Check that we are using non-reserved values.
// Fill up the first chunk.
- CHECK_EQ(NULL, scq.Peek());
+ CHECK(!scq.Peek());
for (Record i = 1; i < 1 + kMaxRecordsInQueue; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.StartEnqueue());
- CHECK_NE(NULL, rec);
+ CHECK(rec);
*rec = i;
scq.FinishEnqueue();
}
// The queue is full, enqueue is not allowed.
- CHECK_EQ(NULL, scq.StartEnqueue());
+ CHECK(!scq.StartEnqueue());
// Try to enqueue when the the queue is full. Consumption must be available.
- CHECK_NE(NULL, scq.Peek());
+ CHECK(scq.Peek());
for (int i = 0; i < 10; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.StartEnqueue());
- CHECK_EQ(NULL, rec);
- CHECK_NE(NULL, scq.Peek());
+ CHECK(!rec);
+ CHECK(scq.Peek());
}
// Consume all records.
for (Record i = 1; i < 1 + kMaxRecordsInQueue; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.Peek());
- CHECK_NE(NULL, rec);
+ CHECK(rec);
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
scq.Remove();
CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
}
// The queue is empty.
- CHECK_EQ(NULL, scq.Peek());
+ CHECK(!scq.Peek());
- CHECK_EQ(NULL, scq.Peek());
+ CHECK(!scq.Peek());
for (Record i = 0; i < kMaxRecordsInQueue / 2; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.StartEnqueue());
- CHECK_NE(NULL, rec);
+ CHECK(rec);
*rec = i;
scq.FinishEnqueue();
}
// Consume all available kMaxRecordsInQueue / 2 records.
- CHECK_NE(NULL, scq.Peek());
+ CHECK(scq.Peek());
for (Record i = 0; i < kMaxRecordsInQueue / 2; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.Peek());
- CHECK_NE(NULL, rec);
+ CHECK(rec);
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
scq.Remove();
@@ -94,7 +94,7 @@ TEST(SamplingCircularQueue) {
}
// The queue is empty.
- CHECK_EQ(NULL, scq.Peek());
+ CHECK(!scq.Peek());
}
@@ -116,7 +116,7 @@ class ProducerThread: public v8::base::Thread {
virtual void Run() {
for (Record i = value_; i < value_ + records_per_chunk_; ++i) {
Record* rec = reinterpret_cast<Record*>(scq_->StartEnqueue());
- CHECK_NE(NULL, rec);
+ CHECK(rec);
*rec = i;
scq_->FinishEnqueue();
}
@@ -147,41 +147,41 @@ TEST(SamplingCircularQueueMultithreading) {
ProducerThread producer2(&scq, kRecordsPerChunk, 10, &semaphore);
ProducerThread producer3(&scq, kRecordsPerChunk, 20, &semaphore);
- CHECK_EQ(NULL, scq.Peek());
+ CHECK(!scq.Peek());
producer1.Start();
semaphore.Wait();
for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.Peek());
- CHECK_NE(NULL, rec);
+ CHECK(rec);
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
scq.Remove();
CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
}
- CHECK_EQ(NULL, scq.Peek());
+ CHECK(!scq.Peek());
producer2.Start();
semaphore.Wait();
for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.Peek());
- CHECK_NE(NULL, rec);
+ CHECK(rec);
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
scq.Remove();
CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
}
- CHECK_EQ(NULL, scq.Peek());
+ CHECK(!scq.Peek());
producer3.Start();
semaphore.Wait();
for (Record i = 20; i < 20 + kRecordsPerChunk; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.Peek());
- CHECK_NE(NULL, rec);
+ CHECK(rec);
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
CHECK_EQ(rec, reinterpret_cast<Record*>(scq.Peek()));
scq.Remove();
CHECK_NE(rec, reinterpret_cast<Record*>(scq.Peek()));
}
- CHECK_EQ(NULL, scq.Peek());
+ CHECK(!scq.Peek());
}
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
index 95035aab0b..c8c48ecc65 100644
--- a/deps/v8/test/cctest/test-code-stubs.cc
+++ b/deps/v8/test/cctest/test-code-stubs.cc
@@ -77,10 +77,9 @@ int STDCALL ConvertDToICVersion(double d) {
void RunOneTruncationTestWithTest(ConvertDToICallWrapper callWrapper,
ConvertDToIFunc func,
double from,
- double raw) {
- uint64_t to = static_cast<int64_t>(raw);
- int result = (*callWrapper)(func, from);
- CHECK_EQ(static_cast<int>(to), result);
+ int32_t to) {
+ int32_t result = (*callWrapper)(func, from);
+ CHECK_EQ(to, result);
}
@@ -92,7 +91,7 @@ int32_t DefaultCallWrapper(ConvertDToIFunc func,
// #define NaN and Infinity so that it's possible to cut-and-paste these tests
// directly to a .js file and run them.
-#define NaN (v8::base::OS::nan_value())
+#define NaN (std::numeric_limits<double>::quiet_NaN())
#define Infinity (std::numeric_limits<double>::infinity())
#define RunOneTruncationTest(p1, p2) \
RunOneTruncationTestWithTest(callWrapper, func, p1, p2)
@@ -123,15 +122,15 @@ void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
RunOneTruncationTest(-0.9999999999999999, 0);
RunOneTruncationTest(4294967296.0, 0);
RunOneTruncationTest(-4294967296.0, 0);
- RunOneTruncationTest(9223372036854775000.0, 4294966272.0);
- RunOneTruncationTest(-9223372036854775000.0, -4294966272.0);
+ RunOneTruncationTest(9223372036854775000.0, -1024);
+ RunOneTruncationTest(-9223372036854775000.0, 1024);
RunOneTruncationTest(4.5036e+15, 372629504);
RunOneTruncationTest(-4.5036e+15, -372629504);
RunOneTruncationTest(287524199.5377777, 0x11234567);
RunOneTruncationTest(-287524199.5377777, -0x11234567);
- RunOneTruncationTest(2300193596.302222, 2300193596.0);
- RunOneTruncationTest(-2300193596.302222, -2300193596.0);
+ RunOneTruncationTest(2300193596.302222, -1994773700);
+ RunOneTruncationTest(-2300193596.302222, 1994773700);
RunOneTruncationTest(4600387192.604444, 305419896);
RunOneTruncationTest(-4600387192.604444, -305419896);
RunOneTruncationTest(4823855600872397.0, 1737075661);
@@ -154,14 +153,14 @@ void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
- RunOneTruncationTest(2147483647.0, 2147483647.0);
- RunOneTruncationTest(-2147483648.0, -2147483648.0);
- RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
- RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
- RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
- RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
- RunOneTruncationTest(1.9342813113834065e+25, 2147483648.0);
- RunOneTruncationTest(-1.9342813113834065e+25, 2147483648.0);
+ RunOneTruncationTest(2147483647.0, 2147483647);
+ RunOneTruncationTest(-2147483648.0, -2147483647-1);
+ RunOneTruncationTest(9.6714111686030497e+24, -2147483647-1);
+ RunOneTruncationTest(-9.6714111686030497e+24, -2147483647-1);
+ RunOneTruncationTest(9.6714157802890681e+24, -2147483647-1);
+ RunOneTruncationTest(-9.6714157802890681e+24, -2147483647-1);
+ RunOneTruncationTest(1.9342813113834065e+25, -2147483647-1);
+ RunOneTruncationTest(-1.9342813113834065e+25, -2147483647-1);
RunOneTruncationTest(3.868562622766813e+25, 0);
RunOneTruncationTest(-3.868562622766813e+25, 0);
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index a05231e534..faf533239e 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -60,9 +60,9 @@ static Handle<JSFunction> Compile(const char* source) {
Handle<String> source_code = isolate->factory()->NewStringFromUtf8(
CStrVector(source)).ToHandleChecked();
Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
- source_code, Handle<String>(), 0, 0, false,
+ source_code, Handle<String>(), 0, 0, false, false,
Handle<Context>(isolate->native_context()), NULL, NULL,
- v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
+ v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE, false);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_function, isolate->native_context());
}
@@ -314,7 +314,9 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
CHECK_EQ(expected_slots, feedback_vector->Slots());
CHECK_EQ(expected_ic_slots, feedback_vector->ICSlots());
FeedbackVectorICSlot slot_for_a(0);
- CHECK(feedback_vector->Get(slot_for_a)->IsJSFunction());
+ Object* object = feedback_vector->Get(slot_for_a);
+ CHECK(object->IsWeakCell() &&
+ WeakCell::cast(object)->value()->IsJSFunction());
CompileRun("%OptimizeFunctionOnNextCall(f); f(fun1);");
@@ -322,7 +324,9 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
// of the full code.
CHECK(f->IsOptimized());
CHECK(f->shared()->has_deoptimization_support());
- CHECK(f->shared()->feedback_vector()->Get(slot_for_a)->IsJSFunction());
+ object = f->shared()->feedback_vector()->Get(slot_for_a);
+ CHECK(object->IsWeakCell() &&
+ WeakCell::cast(object)->value()->IsJSFunction());
}
@@ -400,6 +404,116 @@ TEST(OptimizedCodeSharing) {
}
+TEST(CompileFunctionInContext) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ CompileRun("var r = 10;");
+ v8::Local<v8::Object> math =
+ v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("Math")));
+ v8::ScriptCompiler::Source script_source(v8_str(
+ "a = PI * r * r;"
+ "x = r * cos(PI);"
+ "y = r * sin(PI / 2);"));
+ v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
+ CcTest::isolate(), &script_source, env.local(), 0, NULL, 1, &math);
+ CHECK(!fun.IsEmpty());
+ fun->Call(env->Global(), 0, NULL);
+ CHECK(env->Global()->Has(v8_str("a")));
+ v8::Local<v8::Value> a = env->Global()->Get(v8_str("a"));
+ CHECK(a->IsNumber());
+ CHECK(env->Global()->Has(v8_str("x")));
+ v8::Local<v8::Value> x = env->Global()->Get(v8_str("x"));
+ CHECK(x->IsNumber());
+ CHECK(env->Global()->Has(v8_str("y")));
+ v8::Local<v8::Value> y = env->Global()->Get(v8_str("y"));
+ CHECK(y->IsNumber());
+ CHECK_EQ(314.1592653589793, a->NumberValue());
+ CHECK_EQ(-10.0, x->NumberValue());
+ CHECK_EQ(10.0, y->NumberValue());
+}
+
+
+TEST(CompileFunctionInContextComplex) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ CompileRun(
+ "var x = 1;"
+ "var y = 2;"
+ "var z = 4;"
+ "var a = {x: 8, y: 16};"
+ "var b = {x: 32};");
+ v8::Local<v8::Object> ext[2];
+ ext[0] = v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
+ ext[1] = v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("b")));
+ v8::ScriptCompiler::Source script_source(v8_str("result = x + y + z"));
+ v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
+ CcTest::isolate(), &script_source, env.local(), 0, NULL, 2, ext);
+ CHECK(!fun.IsEmpty());
+ fun->Call(env->Global(), 0, NULL);
+ CHECK(env->Global()->Has(v8_str("result")));
+ v8::Local<v8::Value> result = env->Global()->Get(v8_str("result"));
+ CHECK(result->IsNumber());
+ CHECK_EQ(52.0, result->NumberValue());
+}
+
+
+TEST(CompileFunctionInContextArgs) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ CompileRun("var a = {x: 23};");
+ v8::Local<v8::Object> ext[1];
+ ext[0] = v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
+ v8::ScriptCompiler::Source script_source(v8_str("result = x + b"));
+ v8::Local<v8::String> arg = v8_str("b");
+ v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
+ CcTest::isolate(), &script_source, env.local(), 1, &arg, 1, ext);
+ CHECK(!fun.IsEmpty());
+ v8::Local<v8::Value> b_value = v8::Number::New(CcTest::isolate(), 42.0);
+ fun->Call(env->Global(), 1, &b_value);
+ CHECK(env->Global()->Has(v8_str("result")));
+ v8::Local<v8::Value> result = env->Global()->Get(v8_str("result"));
+ CHECK(result->IsNumber());
+ CHECK_EQ(65.0, result->NumberValue());
+}
+
+
+TEST(CompileFunctionInContextComments) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ CompileRun("var a = {x: 23, y: 1, z: 2};");
+ v8::Local<v8::Object> ext[1];
+ ext[0] = v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
+ v8::ScriptCompiler::Source script_source(
+ v8_str("result = /* y + */ x + b // + z"));
+ v8::Local<v8::String> arg = v8_str("b");
+ v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
+ CcTest::isolate(), &script_source, env.local(), 1, &arg, 1, ext);
+ CHECK(!fun.IsEmpty());
+ v8::Local<v8::Value> b_value = v8::Number::New(CcTest::isolate(), 42.0);
+ fun->Call(env->Global(), 1, &b_value);
+ CHECK(env->Global()->Has(v8_str("result")));
+ v8::Local<v8::Value> result = env->Global()->Get(v8_str("result"));
+ CHECK(result->IsNumber());
+ CHECK_EQ(65.0, result->NumberValue());
+}
+
+
+TEST(CompileFunctionInContextNonIdentifierArgs) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::ScriptCompiler::Source script_source(v8_str("result = 1"));
+ v8::Local<v8::String> arg = v8_str("b }");
+ v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
+ CcTest::isolate(), &script_source, env.local(), 1, &arg, 0, NULL);
+ CHECK(fun.IsEmpty());
+}
+
+
#ifdef ENABLE_DISASSEMBLER
static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
const char* property_name) {
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index 93bed7f4de..b7881edcf6 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -172,12 +172,12 @@ TEST(TrailingJunk) {
TEST(NonStrDecimalLiteral) {
UnicodeCache uc;
- CHECK(std::isnan(
- StringToDouble(&uc, " ", NO_FLAGS, v8::base::OS::nan_value())));
- CHECK(
- std::isnan(StringToDouble(&uc, "", NO_FLAGS, v8::base::OS::nan_value())));
- CHECK(std::isnan(
- StringToDouble(&uc, " ", NO_FLAGS, v8::base::OS::nan_value())));
+ CHECK(std::isnan(StringToDouble(&uc, " ", NO_FLAGS,
+ std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(StringToDouble(&uc, "", NO_FLAGS,
+ std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(StringToDouble(&uc, " ", NO_FLAGS,
+ std::numeric_limits<double>::quiet_NaN())));
CHECK_EQ(0.0, StringToDouble(&uc, "", NO_FLAGS));
CHECK_EQ(0.0, StringToDouble(&uc, " ", NO_FLAGS));
}
@@ -318,7 +318,7 @@ TEST(BitField) {
// One bit bit field can hold values 0 and 1.
CHECK(!OneBit1::is_valid(static_cast<uint32_t>(-1)));
CHECK(!OneBit2::is_valid(static_cast<uint32_t>(-1)));
- for (int i = 0; i < 2; i++) {
+ for (unsigned i = 0; i < 2; i++) {
CHECK(OneBit1::is_valid(i));
x = OneBit1::encode(i);
CHECK_EQ(i, OneBit1::decode(x));
@@ -333,7 +333,7 @@ TEST(BitField) {
// Eight bit bit field can hold values from 0 tp 255.
CHECK(!EightBit1::is_valid(static_cast<uint32_t>(-1)));
CHECK(!EightBit2::is_valid(static_cast<uint32_t>(-1)));
- for (int i = 0; i < 256; i++) {
+ for (unsigned i = 0; i < 256; i++) {
CHECK(EightBit1::is_valid(i));
x = EightBit1::encode(i);
CHECK_EQ(i, EightBit1::decode(x));
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 0e2dd91219..e2b6db0b96 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -32,6 +32,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/platform.h"
#include "src/cpu-profiler-inl.h"
+#include "src/deoptimizer.h"
#include "src/smart-pointers.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -49,6 +50,12 @@ using i::SmartPointer;
using i::Vector;
+// Helper methods
+static v8::Local<v8::Function> GetFunction(v8::Context* env, const char* name) {
+ return v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str(name)));
+}
+
+
TEST(StartStop) {
i::Isolate* isolate = CcTest::i_isolate();
CpuProfilesCollection profiles(isolate->heap());
@@ -114,9 +121,9 @@ i::Code* CreateCode(LocalContext* env) {
"}\n"
"%s();\n", name_start, counter, name_start, name_start);
CompileRun(script.start());
- i::Handle<i::JSFunction> fun = v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast(
- (*env)->Global()->Get(v8_str(name_start))));
+
+ i::Handle<i::JSFunction> fun =
+ v8::Utils::OpenHandle(*GetFunction(**env, name_start));
return fun->code();
}
@@ -164,22 +171,22 @@ TEST(CodeEvents) {
// Check the state of profile generator.
CodeEntry* aaa = generator.code_map()->FindEntry(aaa_code->address());
- CHECK_NE(NULL, aaa);
- CHECK_EQ(aaa_str, aaa->name());
+ CHECK(aaa);
+ CHECK_EQ(0, strcmp(aaa_str, aaa->name()));
CodeEntry* comment = generator.code_map()->FindEntry(comment_code->address());
- CHECK_NE(NULL, comment);
- CHECK_EQ("comment", comment->name());
+ CHECK(comment);
+ CHECK_EQ(0, strcmp("comment", comment->name()));
CodeEntry* args5 = generator.code_map()->FindEntry(args5_code->address());
- CHECK_NE(NULL, args5);
- CHECK_EQ("5", args5->name());
+ CHECK(args5);
+ CHECK_EQ(0, strcmp("5", args5->name()));
- CHECK_EQ(NULL, generator.code_map()->FindEntry(comment2_code->address()));
+ CHECK(!generator.code_map()->FindEntry(comment2_code->address()));
CodeEntry* comment2 = generator.code_map()->FindEntry(moved_code->address());
- CHECK_NE(NULL, comment2);
- CHECK_EQ("comment2", comment2->name());
+ CHECK(comment2);
+ CHECK_EQ(0, strcmp("comment2", comment2->name()));
}
@@ -224,21 +231,21 @@ TEST(TickEvents) {
processor->StopSynchronously();
CpuProfile* profile = profiles->StopProfiling("");
- CHECK_NE(NULL, profile);
+ CHECK(profile);
// Check call trees.
const i::List<ProfileNode*>* top_down_root_children =
profile->top_down()->root()->children();
CHECK_EQ(1, top_down_root_children->length());
- CHECK_EQ("bbb", top_down_root_children->last()->entry()->name());
+ CHECK_EQ(0, strcmp("bbb", top_down_root_children->last()->entry()->name()));
const i::List<ProfileNode*>* top_down_bbb_children =
top_down_root_children->last()->children();
CHECK_EQ(1, top_down_bbb_children->length());
- CHECK_EQ("5", top_down_bbb_children->last()->entry()->name());
+ CHECK_EQ(0, strcmp("5", top_down_bbb_children->last()->entry()->name()));
const i::List<ProfileNode*>* top_down_stub_children =
top_down_bbb_children->last()->children();
CHECK_EQ(1, top_down_stub_children->length());
- CHECK_EQ("ddd", top_down_stub_children->last()->entry()->name());
+ CHECK_EQ(0, strcmp("ddd", top_down_stub_children->last()->entry()->name()));
const i::List<ProfileNode*>* top_down_ddd_children =
top_down_stub_children->last()->children();
CHECK_EQ(0, top_down_ddd_children->length());
@@ -289,9 +296,9 @@ TEST(Issue1398) {
processor->StopSynchronously();
CpuProfile* profile = profiles->StopProfiling("");
- CHECK_NE(NULL, profile);
+ CHECK(profile);
- int actual_depth = 0;
+ unsigned actual_depth = 0;
const ProfileNode* node = profile->top_down()->root();
while (node->children()->length() > 0) {
node = node->children()->last();
@@ -353,25 +360,25 @@ TEST(DeleteCpuProfile) {
i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(cpu_profiler);
CHECK_EQ(0, iprofiler->GetProfilesCount());
- v8::Local<v8::String> name1 = v8::String::NewFromUtf8(env->GetIsolate(), "1");
+ v8::Local<v8::String> name1 = v8_str("1");
cpu_profiler->StartProfiling(name1);
v8::CpuProfile* p1 = cpu_profiler->StopProfiling(name1);
- CHECK_NE(NULL, p1);
+ CHECK(p1);
CHECK_EQ(1, iprofiler->GetProfilesCount());
CHECK(FindCpuProfile(cpu_profiler, p1));
p1->Delete();
CHECK_EQ(0, iprofiler->GetProfilesCount());
- v8::Local<v8::String> name2 = v8::String::NewFromUtf8(env->GetIsolate(), "2");
+ v8::Local<v8::String> name2 = v8_str("2");
cpu_profiler->StartProfiling(name2);
v8::CpuProfile* p2 = cpu_profiler->StopProfiling(name2);
- CHECK_NE(NULL, p2);
+ CHECK(p2);
CHECK_EQ(1, iprofiler->GetProfilesCount());
CHECK(FindCpuProfile(cpu_profiler, p2));
- v8::Local<v8::String> name3 = v8::String::NewFromUtf8(env->GetIsolate(), "3");
+ v8::Local<v8::String> name3 = v8_str("3");
cpu_profiler->StartProfiling(name3);
v8::CpuProfile* p3 = cpu_profiler->StopProfiling(name3);
- CHECK_NE(NULL, p3);
+ CHECK(p3);
CHECK_EQ(2, iprofiler->GetProfilesCount());
CHECK_NE(p2, p3);
CHECK(FindCpuProfile(cpu_profiler, p3));
@@ -390,8 +397,7 @@ TEST(ProfileStartEndTime) {
v8::HandleScope scope(env->GetIsolate());
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name =
- v8::String::NewFromUtf8(env->GetIsolate(), "test");
+ v8::Local<v8::String> profile_name = v8_str("test");
cpu_profiler->StartProfiling(profile_name);
const v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
CHECK(profile->GetStartTime() <= profile->GetEndTime());
@@ -403,8 +409,7 @@ static v8::CpuProfile* RunProfiler(
v8::Handle<v8::Value> argv[], int argc,
unsigned min_js_samples, bool collect_samples = false) {
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name =
- v8::String::NewFromUtf8(env->GetIsolate(), "my_profile");
+ v8::Local<v8::String> profile_name = v8_str("my_profile");
cpu_profiler->StartProfiling(profile_name, collect_samples);
@@ -417,7 +422,7 @@ static v8::CpuProfile* RunProfiler(
v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
- CHECK_NE(NULL, profile);
+ CHECK(profile);
// Dump collected profile to have a better diagnostic in case of failure.
reinterpret_cast<i::CpuProfile*>(profile)->Print();
@@ -444,7 +449,7 @@ static void CheckChildrenNames(const v8::CpuProfileNode* node,
// Check that there are no duplicates.
for (int j = 0; j < count; j++) {
if (j == i) continue;
- CHECK_NE(name, node->GetChild(j)->GetFunctionName());
+ CHECK(!name->Equals(node->GetChild(j)->GetFunctionName()));
}
}
}
@@ -454,7 +459,7 @@ static const v8::CpuProfileNode* FindChild(v8::Isolate* isolate,
const v8::CpuProfileNode* node,
const char* name) {
int count = node->GetChildrenCount();
- v8::Handle<v8::String> nameHandle = v8::String::NewFromUtf8(isolate, name);
+ v8::Handle<v8::String> nameHandle = v8_str(name);
for (int i = 0; i < count; i++) {
const v8::CpuProfileNode* child = node->GetChild(i);
if (nameHandle->Equals(child->GetFunctionName())) return child;
@@ -489,6 +494,17 @@ static void CheckSimpleBranch(v8::Isolate* isolate,
}
+static const v8::CpuProfileNode* GetSimpleBranch(v8::Isolate* isolate,
+ const v8::CpuProfileNode* node,
+ const char* names[],
+ int length) {
+ for (int i = 0; i < length; i++) {
+ node = GetChild(isolate, node, names[i]);
+ }
+ return node;
+}
+
+
static const char* cpu_profiler_test_source = "function loop(timeout) {\n"
" this.mmm = 0;\n"
" var start = Date.now();\n"
@@ -542,10 +558,8 @@ TEST(CollectCpuProfile) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
- cpu_profiler_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(cpu_profiler_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
int32_t profiling_interval_ms = 200;
v8::Handle<v8::Value> args[] = {
@@ -558,11 +572,9 @@ TEST(CollectCpuProfile) {
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
CheckChildrenNames(root, names);
const v8::CpuProfileNode* startNode =
@@ -619,11 +631,8 @@ TEST(HotDeoptNoFrameEntry) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(v8::String::NewFromUtf8(
- env->GetIsolate(),
- hot_deopt_no_frame_entry_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(hot_deopt_no_frame_entry_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
int32_t profiling_interval_ms = 200;
v8::Handle<v8::Value> args[] = {
@@ -636,11 +645,9 @@ TEST(HotDeoptNoFrameEntry) {
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
CheckChildrenNames(root, names);
const v8::CpuProfileNode* startNode =
@@ -657,10 +664,8 @@ TEST(CollectCpuProfileSamples) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
- cpu_profiler_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(cpu_profiler_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
int32_t profiling_interval_ms = 200;
v8::Handle<v8::Value> args[] = {
@@ -674,7 +679,7 @@ TEST(CollectCpuProfileSamples) {
uint64_t current_time = profile->GetStartTime();
CHECK_LE(current_time, end_time);
for (int i = 0; i < profile->GetSamplesCount(); i++) {
- CHECK_NE(NULL, profile->GetSample(i));
+ CHECK(profile->GetSample(i));
uint64_t timestamp = profile->GetSampleTimestamp(i);
CHECK_LE(current_time, timestamp);
CHECK_LE(timestamp, end_time);
@@ -709,10 +714,8 @@ TEST(SampleWhenFrameIsNotSetup) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(v8::String::NewFromUtf8(
- env->GetIsolate(), cpu_profiler_test_source2))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(cpu_profiler_test_source2);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
int32_t repeat_count = 100;
#if defined(USE_SIMULATOR)
@@ -728,11 +731,9 @@ TEST(SampleWhenFrameIsNotSetup) {
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
CheckChildrenNames(root, names);
const v8::CpuProfileNode* startNode =
@@ -826,19 +827,14 @@ TEST(NativeAccessorUninitializedIC) {
TestApiCallbacks accessors(100);
v8::Local<v8::External> data =
v8::External::New(isolate, &accessors);
- instance_template->SetAccessor(
- v8::String::NewFromUtf8(isolate, "foo"),
- &TestApiCallbacks::Getter, &TestApiCallbacks::Setter, data);
+ instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
+ &TestApiCallbacks::Setter, data);
v8::Local<v8::Function> func = func_template->GetFunction();
v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
- instance);
+ env->Global()->Set(v8_str("instance"), instance);
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, native_accessor_test_source))
- ->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(isolate, "start")));
+ CompileRun(native_accessor_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
int32_t repeat_count = 1;
v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
@@ -871,19 +867,14 @@ TEST(NativeAccessorMonomorphicIC) {
TestApiCallbacks accessors(1);
v8::Local<v8::External> data =
v8::External::New(isolate, &accessors);
- instance_template->SetAccessor(
- v8::String::NewFromUtf8(isolate, "foo"),
- &TestApiCallbacks::Getter, &TestApiCallbacks::Setter, data);
+ instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
+ &TestApiCallbacks::Setter, data);
v8::Local<v8::Function> func = func_template->GetFunction();
v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
- instance);
+ env->Global()->Set(v8_str("instance"), instance);
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, native_accessor_test_source))
- ->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(isolate, "start")));
+ CompileRun(native_accessor_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
{
// Make sure accessors ICs are in monomorphic state before starting
@@ -930,26 +921,22 @@ TEST(NativeMethodUninitializedIC) {
v8::Local<v8::FunctionTemplate> func_template =
v8::FunctionTemplate::New(isolate);
- func_template->SetClassName(
- v8::String::NewFromUtf8(isolate, "Test_InstanceCostructor"));
+ func_template->SetClassName(v8_str("Test_InstanceCostructor"));
v8::Local<v8::ObjectTemplate> proto_template =
func_template->PrototypeTemplate();
v8::Local<v8::Signature> signature =
v8::Signature::New(isolate, func_template);
- proto_template->Set(v8::String::NewFromUtf8(isolate, "fooMethod"),
- v8::FunctionTemplate::New(isolate,
- &TestApiCallbacks::Callback,
- data, signature, 0));
+ proto_template->Set(
+ v8_str("fooMethod"),
+ v8::FunctionTemplate::New(isolate, &TestApiCallbacks::Callback, data,
+ signature, 0));
v8::Local<v8::Function> func = func_template->GetFunction();
v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
- instance);
+ env->Global()->Set(v8_str("instance"), instance);
- v8::Script::Compile(v8::String::NewFromUtf8(
- isolate, native_method_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(isolate, "start")));
+ CompileRun(native_method_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
int32_t repeat_count = 1;
v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
@@ -976,26 +963,22 @@ TEST(NativeMethodMonomorphicIC) {
v8::Local<v8::FunctionTemplate> func_template =
v8::FunctionTemplate::New(isolate);
- func_template->SetClassName(
- v8::String::NewFromUtf8(isolate, "Test_InstanceCostructor"));
+ func_template->SetClassName(v8_str("Test_InstanceCostructor"));
v8::Local<v8::ObjectTemplate> proto_template =
func_template->PrototypeTemplate();
v8::Local<v8::Signature> signature =
v8::Signature::New(isolate, func_template);
- proto_template->Set(v8::String::NewFromUtf8(isolate, "fooMethod"),
- v8::FunctionTemplate::New(isolate,
- &TestApiCallbacks::Callback,
- data, signature, 0));
+ proto_template->Set(
+ v8_str("fooMethod"),
+ v8::FunctionTemplate::New(isolate, &TestApiCallbacks::Callback, data,
+ signature, 0));
v8::Local<v8::Function> func = func_template->GetFunction();
v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
- instance);
+ env->Global()->Set(v8_str("instance"), instance);
- v8::Script::Compile(v8::String::NewFromUtf8(
- isolate, native_method_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(isolate, "start")));
+ CompileRun(native_method_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
{
// Make sure method ICs are in monomorphic state before starting
// profiling.
@@ -1038,21 +1021,16 @@ TEST(BoundFunctionCall) {
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), bound_function_test_source))
- ->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(bound_function_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
// Don't allow |foo| node to be at the top level.
CheckChildrenNames(root, names);
@@ -1090,10 +1068,10 @@ TEST(TickLines) {
CompileRun(script.start());
- i::Handle<i::JSFunction> func = v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast((*env)->Global()->Get(v8_str(func_name))));
- CHECK_NE(NULL, func->shared());
- CHECK_NE(NULL, func->shared()->code());
+ i::Handle<i::JSFunction> func =
+ v8::Utils::OpenHandle(*GetFunction(*env, func_name));
+ CHECK(func->shared());
+ CHECK(func->shared()->code());
i::Code* code = NULL;
if (func->code()->is_optimized_code()) {
code = func->code();
@@ -1101,9 +1079,9 @@ TEST(TickLines) {
CHECK(func->shared()->code() == func->code() || !i::FLAG_crankshaft);
code = func->shared()->code();
}
- CHECK_NE(NULL, code);
+ CHECK(code);
i::Address code_address = code->instruction_start();
- CHECK_NE(NULL, code_address);
+ CHECK(code_address);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
profiles->StartProfiling("", false);
@@ -1126,22 +1104,22 @@ TEST(TickLines) {
processor->StopSynchronously();
CpuProfile* profile = profiles->StopProfiling("");
- CHECK_NE(NULL, profile);
+ CHECK(profile);
// Check the state of profile generator.
CodeEntry* func_entry = generator.code_map()->FindEntry(code_address);
- CHECK_NE(NULL, func_entry);
- CHECK_EQ(func_name, func_entry->name());
+ CHECK(func_entry);
+ CHECK_EQ(0, strcmp(func_name, func_entry->name()));
const i::JITLineInfoTable* line_info = func_entry->line_info();
- CHECK_NE(NULL, line_info);
+ CHECK(line_info);
CHECK(!line_info->empty());
// Check the hit source lines using V8 Public APIs.
const i::ProfileTree* tree = profile->top_down();
ProfileNode* root = tree->root();
- CHECK_NE(NULL, root);
+ CHECK(root);
ProfileNode* func_node = root->FindChild(func_entry);
- CHECK_NE(NULL, func_node);
+ CHECK(func_node);
// Add 10 faked ticks to source line #5.
int hit_line = 5;
@@ -1149,7 +1127,7 @@ TEST(TickLines) {
for (int i = 0; i < hit_count; i++) func_node->IncrementLineTicks(hit_line);
unsigned int line_count = func_node->GetHitLineCount();
- CHECK_EQ(2, line_count); // Expect two hit source lines - #1 and #5.
+ CHECK_EQ(2u, line_count); // Expect two hit source lines - #1 and #5.
ScopedVector<v8::CpuProfileNode::LineTick> entries(line_count);
CHECK(func_node->GetLineTicks(&entries[0], line_count));
int value = 0;
@@ -1190,13 +1168,12 @@ TEST(FunctionCallSample) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- // Collect garbage that might have be generated while installing extensions.
+ // Collect garbage that might have be generated while installing
+ // extensions.
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
- v8::Script::Compile(v8::String::NewFromUtf8(
- env->GetIsolate(), call_function_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(call_function_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
int32_t duration_ms = 100;
v8::Handle<v8::Value> args[] = {
@@ -1208,13 +1185,10 @@ TEST(FunctionCallSample) {
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
ScopedVector<v8::Handle<v8::String> > names(4);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
- names[3] = v8::String::NewFromUtf8(
- env->GetIsolate(), i::ProfileGenerator::kUnresolvedFunctionName);
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
+ names[3] = v8_str(i::ProfileGenerator::kUnresolvedFunctionName);
// Don't allow |bar| and |call| nodes to be at the top level.
CheckChildrenNames(root, names);
}
@@ -1228,8 +1202,8 @@ TEST(FunctionCallSample) {
CHECK(is_gc_stress_testing || startNode);
if (startNode) {
ScopedVector<v8::Handle<v8::String> > names(2);
- names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "bar");
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(), "call");
+ names[0] = v8_str("bar");
+ names[1] = v8_str("call");
CheckChildrenNames(startNode, names);
}
@@ -1237,7 +1211,7 @@ TEST(FunctionCallSample) {
env->GetIsolate(), root, i::ProfileGenerator::kUnresolvedFunctionName);
if (unresolvedNode) {
ScopedVector<v8::Handle<v8::String> > names(1);
- names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "call");
+ names[0] = v8_str("call");
CheckChildrenNames(unresolvedNode, names);
}
@@ -1245,19 +1219,20 @@ TEST(FunctionCallSample) {
}
-static const char* function_apply_test_source = "function bar(iterations) {\n"
-"}\n"
-"function test() {\n"
-" bar.apply(this, [10 * 1000]);\n"
-"}\n"
-"function start(duration) {\n"
-" var start = Date.now();\n"
-" while (Date.now() - start < duration) {\n"
-" try {\n"
-" test();\n"
-" } catch(e) {}\n"
-" }\n"
-"}";
+static const char* function_apply_test_source =
+ "function bar(iterations) {\n"
+ "}\n"
+ "function test() {\n"
+ " bar.apply(this, [10 * 1000]);\n"
+ "}\n"
+ "function start(duration) {\n"
+ " var start = Date.now();\n"
+ " while (Date.now() - start < duration) {\n"
+ " try {\n"
+ " test();\n"
+ " } catch(e) {}\n"
+ " }\n"
+ "}";
// [Top down]:
@@ -1274,11 +1249,8 @@ TEST(FunctionApplySample) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), function_apply_test_source))
- ->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(function_apply_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
int32_t duration_ms = 100;
v8::Handle<v8::Value> args[] = {
@@ -1291,11 +1263,9 @@ TEST(FunctionApplySample) {
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
// Don't allow |test|, |bar| and |apply| nodes to be at the top level.
CheckChildrenNames(root, names);
}
@@ -1305,9 +1275,8 @@ TEST(FunctionApplySample) {
if (startNode) {
{
ScopedVector<v8::Handle<v8::String> > names(2);
- names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "test");
- names[1] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kUnresolvedFunctionName);
+ names[0] = v8_str("test");
+ names[1] = v8_str(ProfileGenerator::kUnresolvedFunctionName);
CheckChildrenNames(startNode, names);
}
@@ -1315,11 +1284,11 @@ TEST(FunctionApplySample) {
FindChild(env->GetIsolate(), startNode, "test");
if (testNode) {
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "bar");
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(), "apply");
+ names[0] = v8_str("bar");
+ names[1] = v8_str("apply");
// apply calls "get length" before invoking the function itself
// and we may get hit into it.
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "get length");
+ names[2] = v8_str("get length");
CheckChildrenNames(testNode, names);
}
@@ -1327,7 +1296,7 @@ TEST(FunctionApplySample) {
FindChild(env->GetIsolate(), startNode,
ProfileGenerator::kUnresolvedFunctionName)) {
ScopedVector<v8::Handle<v8::String> > names(1);
- names[0] = v8::String::NewFromUtf8(env->GetIsolate(), "apply");
+ names[0] = v8_str("apply");
CheckChildrenNames(unresolvedNode, names);
GetChild(env->GetIsolate(), unresolvedNode, "apply");
}
@@ -1365,28 +1334,23 @@ TEST(CpuProfileDeepStack) {
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Script::Compile(v8::String::NewFromUtf8(
- env->GetIsolate(), cpu_profiler_deep_stack_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(cpu_profiler_deep_stack_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name =
- v8::String::NewFromUtf8(env->GetIsolate(), "my_profile");
+ v8::Local<v8::String> profile_name = v8_str("my_profile");
function->Call(env->Global(), 0, NULL);
v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
- CHECK_NE(NULL, profile);
+ CHECK(profile);
// Dump collected profile to have a better diagnostic in case of failure.
reinterpret_cast<i::CpuProfile*>(profile)->Print();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
CheckChildrenNames(root, names);
}
@@ -1438,25 +1402,20 @@ TEST(JsNativeJsSample) {
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
env->GetIsolate(), CallJsFunction);
v8::Local<v8::Function> func = func_template->GetFunction();
- func->SetName(v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction"));
- env->Global()->Set(
- v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction"), func);
+ func->SetName(v8_str("CallJsFunction"));
+ env->Global()->Set(v8_str("CallJsFunction"), func);
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
- js_native_js_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(js_native_js_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
CheckChildrenNames(root, names);
}
@@ -1507,25 +1466,19 @@ TEST(JsNativeJsRuntimeJsSample) {
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
env->GetIsolate(), CallJsFunction);
v8::Local<v8::Function> func = func_template->GetFunction();
- func->SetName(v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction"));
- env->Global()->Set(
- v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction"), func);
+ func->SetName(v8_str("CallJsFunction"));
+ env->Global()->Set(v8_str("CallJsFunction"), func);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(),
- js_native_js_runtime_js_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(js_native_js_runtime_js_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
CheckChildrenNames(root, names);
const v8::CpuProfileNode* startNode =
@@ -1587,31 +1540,24 @@ TEST(JsNative1JsNative2JsSample) {
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
env->GetIsolate(), CallJsFunction);
v8::Local<v8::Function> func1 = func_template->GetFunction();
- func1->SetName(v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction1"));
- env->Global()->Set(
- v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction1"), func1);
+ func1->SetName(v8_str("CallJsFunction1"));
+ env->Global()->Set(v8_str("CallJsFunction1"), func1);
v8::Local<v8::Function> func2 = v8::FunctionTemplate::New(
env->GetIsolate(), CallJsFunction2)->GetFunction();
- func2->SetName(v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction2"));
- env->Global()->Set(
- v8::String::NewFromUtf8(env->GetIsolate(), "CallJsFunction2"), func2);
+ func2->SetName(v8_str("CallJsFunction2"));
+ env->Global()->Set(v8_str("CallJsFunction2"), func2);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(),
- js_native1_js_native2_js_test_source))->Run();
- v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+ CompileRun(js_native1_js_native2_js_test_source);
+ v8::Local<v8::Function> function = GetFunction(*env, "start");
v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str("start");
CheckChildrenNames(root, names);
const v8::CpuProfileNode* startNode =
@@ -1644,8 +1590,7 @@ TEST(IdleTime) {
v8::HandleScope scope(env->GetIsolate());
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- v8::Local<v8::String> profile_name =
- v8::String::NewFromUtf8(env->GetIsolate(), "my_profile");
+ v8::Local<v8::String> profile_name = v8_str("my_profile");
cpu_profiler->StartProfiling(profile_name);
i::Isolate* isolate = CcTest::i_isolate();
@@ -1663,29 +1608,26 @@ TEST(IdleTime) {
v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
- CHECK_NE(NULL, profile);
+ CHECK(profile);
// Dump collected profile to have a better diagnostic in case of failure.
reinterpret_cast<i::CpuProfile*>(profile)->Print();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
- names[0] = v8::String::NewFromUtf8(
- env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kProgramEntryName);
- names[2] = v8::String::NewFromUtf8(env->GetIsolate(),
- ProfileGenerator::kIdleEntryName);
+ names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8_str(ProfileGenerator::kProgramEntryName);
+ names[2] = v8_str(ProfileGenerator::kIdleEntryName);
CheckChildrenNames(root, names);
const v8::CpuProfileNode* programNode =
GetChild(env->GetIsolate(), root, ProfileGenerator::kProgramEntryName);
CHECK_EQ(0, programNode->GetChildrenCount());
- CHECK_GE(programNode->GetHitCount(), 3);
+ CHECK_GE(programNode->GetHitCount(), 3u);
const v8::CpuProfileNode* idleNode =
GetChild(env->GetIsolate(), root, ProfileGenerator::kIdleEntryName);
CHECK_EQ(0, idleNode->GetChildrenCount());
- CHECK_GE(idleNode->GetHitCount(), 3);
+ CHECK_GE(idleNode->GetHitCount(), 3u);
profile->Delete();
}
@@ -1695,10 +1637,8 @@ static void CheckFunctionDetails(v8::Isolate* isolate,
const v8::CpuProfileNode* node,
const char* name, const char* script_name,
int script_id, int line, int column) {
- CHECK_EQ(v8::String::NewFromUtf8(isolate, name),
- node->GetFunctionName());
- CHECK_EQ(v8::String::NewFromUtf8(isolate, script_name),
- node->GetScriptResourceName());
+ CHECK(v8_str(name)->Equals(node->GetFunctionName()));
+ CHECK(v8_str(script_name)->Equals(node->GetScriptResourceName()));
CHECK_EQ(script_id, node->GetScriptId());
CHECK_EQ(line, node->GetLineNumber());
CHECK_EQ(column, node->GetColumnNumber());
@@ -1751,17 +1691,16 @@ TEST(DontStopOnFinishedProfileDelete) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Isolate* isolate = env->GetIsolate();
v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
CHECK_EQ(0, iprofiler->GetProfilesCount());
- v8::Handle<v8::String> outer = v8::String::NewFromUtf8(isolate, "outer");
+ v8::Handle<v8::String> outer = v8_str("outer");
profiler->StartProfiling(outer);
CHECK_EQ(0, iprofiler->GetProfilesCount());
- v8::Handle<v8::String> inner = v8::String::NewFromUtf8(isolate, "inner");
+ v8::Handle<v8::String> inner = v8_str("inner");
profiler->StartProfiling(inner);
CHECK_EQ(0, iprofiler->GetProfilesCount());
@@ -1779,3 +1718,81 @@ TEST(DontStopOnFinishedProfileDelete) {
outer_profile = NULL;
CHECK_EQ(0, iprofiler->GetProfilesCount());
}
+
+
+static const char* collect_deopt_events_test_source =
+ "function opt_function(left, right, depth) {\n"
+ " if (depth) return opt_function(left, right, depth - 1);\n"
+ "\n"
+ " var k = left / 10;\n"
+ " var r = 10 / right;\n"
+ " return k + r;"
+ "}\n"
+ "\n"
+ "function test(left, right) {\n"
+ " return opt_function(left, right, 1);\n"
+ "}\n"
+ "\n"
+ "startProfiling();\n"
+ "\n"
+ "test(10, 10);\n"
+ "\n"
+ "%OptimizeFunctionOnNextCall(opt_function)\n"
+ "\n"
+ "test(10, 10);\n"
+ "\n"
+ "test(undefined, 10);\n"
+ "\n"
+ "%OptimizeFunctionOnNextCall(opt_function)\n"
+ "\n"
+ "test(10, 10);\n"
+ "\n"
+ "test(10, 0);\n"
+ "\n"
+ "stopProfiling();\n"
+ "\n";
+
+
+TEST(CollectDeoptEvents) {
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
+ i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+
+ v8::Script::Compile(v8_str(collect_deopt_events_test_source))->Run();
+ i::CpuProfile* iprofile = iprofiler->GetProfile(0);
+ iprofile->Print();
+ v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
+ const char* branch[] = {"", "test", "opt_function", "opt_function"};
+ const v8::CpuProfileNode* opt_function = GetSimpleBranch(
+ env->GetIsolate(), profile->GetTopDownRoot(), branch, arraysize(branch));
+ CHECK(opt_function);
+ const i::ProfileNode* iopt_function =
+ reinterpret_cast<const i::ProfileNode*>(opt_function);
+ CHECK_EQ(2, iopt_function->deopt_infos().length());
+ CHECK_EQ(i::Deoptimizer::GetDeoptReason(i::Deoptimizer::kNotAHeapNumber),
+ iopt_function->deopt_infos()[0].deopt_reason);
+ CHECK_EQ(i::Deoptimizer::GetDeoptReason(i::Deoptimizer::kDivisionByZero),
+ iopt_function->deopt_infos()[1].deopt_reason);
+ iprofiler->DeleteProfile(iprofile);
+}
+
+
+TEST(SourceLocation) {
+ i::FLAG_always_opt = true;
+ i::FLAG_hydrogen_track_positions = true;
+ LocalContext env;
+ v8::HandleScope scope(CcTest::isolate());
+
+ const char* source =
+ "function CompareStatementWithThis() {\n"
+ " if (this === 1) {}\n"
+ "}\n"
+ "CompareStatementWithThis();\n";
+
+ v8::Script::Compile(v8_str(source))->Run();
+}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index c3c65fd2f0..a4a993ad30 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -394,7 +394,7 @@ void CheckDebuggerUnloaded(bool check_functions) {
// Check that the debugger context is cleared and that there is no debug
// information stored for the debugger.
CHECK(CcTest::i_isolate()->debug()->debug_context().is_null());
- CHECK_EQ(NULL, CcTest::i_isolate()->debug()->debug_info_list_);
+ CHECK(!CcTest::i_isolate()->debug()->debug_info_list_);
// Collect garbage to ensure weak handles are cleared.
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
@@ -2148,12 +2148,12 @@ TEST(ScriptBreakPointLine) {
// Call f and check that the script break point.
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
- CHECK_EQ("f", last_function_hit);
+ CHECK_EQ(0, strcmp("f", last_function_hit));
// Call g and check that the script break point.
g->Call(env->Global(), 0, NULL);
CHECK_EQ(3, break_point_hit_count);
- CHECK_EQ("g", last_function_hit);
+ CHECK_EQ(0, strcmp("g", last_function_hit));
// Clear the script break point on g and set one on h.
ClearBreakPointFromJS(env->GetIsolate(), sbp3);
@@ -2163,7 +2163,7 @@ TEST(ScriptBreakPointLine) {
// Call g and check that the script break point in h is hit.
g->Call(env->Global(), 0, NULL);
CHECK_EQ(4, break_point_hit_count);
- CHECK_EQ("h", last_function_hit);
+ CHECK_EQ(0, strcmp("h", last_function_hit));
// Clear break points in f and h. Set a new one in the script between
// functions f and g and test that there is no break points in f and g any
@@ -4242,7 +4242,7 @@ TEST(DebugBreak) {
}
// One break for each function called.
- CHECK_EQ(4 * arraysize(argv), break_point_hit_count);
+ CHECK(4 * arraysize(argv) == break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
@@ -5675,8 +5675,7 @@ TEST(CallFunctionInDebugger) {
// Calling a function through the debugger returns 0 frames if there are
// no JavaScript frames.
- CHECK_EQ(v8::Integer::New(isolate, 0),
- v8::Debug::Call(frame_count));
+ CHECK(v8::Integer::New(isolate, 0)->Equals(v8::Debug::Call(frame_count)));
// Test that the number of frames can be retrieved.
v8::Script::Compile(
@@ -5973,7 +5972,7 @@ TEST(ScriptNameAndData) {
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
- CHECK_EQ("name", last_script_name_hit);
+ CHECK_EQ(0, strcmp("name", last_script_name_hit));
// Compile the same script again without setting data. As the compilation
// cache is disabled when debugging expect the data to be missing.
@@ -5982,7 +5981,7 @@ TEST(ScriptNameAndData) {
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
- CHECK_EQ("name", last_script_name_hit);
+ CHECK_EQ(0, strcmp("name", last_script_name_hit));
v8::Local<v8::String> data_obj_source = v8::String::NewFromUtf8(
env->GetIsolate(),
@@ -5999,7 +5998,7 @@ TEST(ScriptNameAndData) {
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(3, break_point_hit_count);
- CHECK_EQ("new name", last_script_name_hit);
+ CHECK_EQ(0, strcmp("new name", last_script_name_hit));
v8::Handle<v8::Script> script3 = v8::Script::Compile(script, &origin2);
script3->Run();
@@ -6204,7 +6203,7 @@ TEST(RegExpDebugBreak) {
// Check that there was only one break event. Matching RegExp should not
// cause Break events.
CHECK_EQ(1, break_point_hit_count);
- CHECK_EQ("f", last_function_hit);
+ CHECK_EQ(0, strcmp("f", last_function_hit));
}
#endif // V8_INTERPRETED_REGEXP
@@ -6930,7 +6929,7 @@ TEST(DebugContextIsPreservedBetweenAccesses) {
static v8::Handle<v8::Value> expected_callback_data;
static void DebugEventContextChecker(const v8::Debug::EventDetails& details) {
CHECK(details.GetEventContext() == expected_context);
- CHECK_EQ(expected_callback_data, details.GetCallbackData());
+ CHECK(expected_callback_data->Equals(details.GetCallbackData()));
}
@@ -7442,7 +7441,7 @@ TEST(PrecompiledFunction) {
v8::Local<v8::Value> result = ParserCacheCompileRun(source);
CHECK(result->IsString());
v8::String::Utf8Value utf8(result);
- CHECK_EQ("bar", *utf8);
+ CHECK_EQ(0, strcmp("bar", *utf8));
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
diff --git a/deps/v8/test/cctest/test-declarative-accessors.cc b/deps/v8/test/cctest/test-declarative-accessors.cc
deleted file mode 100644
index 8d93245eb6..0000000000
--- a/deps/v8/test/cctest/test-declarative-accessors.cc
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-
-
-class HandleArray : public Malloced {
- public:
- static const unsigned kArraySize = 200;
- HandleArray() {}
- ~HandleArray() { Reset(); }
- void Reset() {
- for (unsigned i = 0; i < kArraySize; i++) {
- if (handles_[i].IsEmpty()) continue;
- handles_[i].Reset();
- }
- }
- v8::Persistent<v8::Value> handles_[kArraySize];
- private:
- DISALLOW_COPY_AND_ASSIGN(HandleArray);
-};
-
-
-// An aligned character array of size 1024.
-class AlignedArray : public Malloced {
- public:
- static const unsigned kArraySize = 1024/sizeof(uint64_t);
- AlignedArray() { Reset(); }
-
- void Reset() {
- for (unsigned i = 0; i < kArraySize; i++) {
- data_[i] = 0;
- }
- }
-
- template<typename T>
- T As() { return reinterpret_cast<T>(data_); }
-
- private:
- uint64_t data_[kArraySize];
- DISALLOW_COPY_AND_ASSIGN(AlignedArray);
-};
-
-
-class DescriptorTestHelper {
- public:
- DescriptorTestHelper() :
- isolate_(NULL), array_(new AlignedArray), handle_array_(new HandleArray) {
- v8::V8::Initialize();
- isolate_ = CcTest::isolate();
- }
- v8::Isolate* isolate_;
- // Data objects.
- SmartPointer<AlignedArray> array_;
- SmartPointer<HandleArray> handle_array_;
- private:
- DISALLOW_COPY_AND_ASSIGN(DescriptorTestHelper);
-};
-
-
-static v8::Local<v8::ObjectTemplate> CreateConstructor(
- v8::Handle<v8::Context> context,
- const char* class_name,
- int internal_field,
- const char* descriptor_name = NULL,
- v8::Handle<v8::DeclaredAccessorDescriptor> descriptor =
- v8::Handle<v8::DeclaredAccessorDescriptor>()) {
- v8::Local<v8::FunctionTemplate> constructor =
- v8::FunctionTemplate::New(context->GetIsolate());
- v8::Local<v8::ObjectTemplate> obj_template = constructor->InstanceTemplate();
- // Setup object template.
- if (descriptor_name != NULL && !descriptor.IsEmpty()) {
- bool added_accessor =
- obj_template->SetDeclaredAccessor(v8_str(descriptor_name), descriptor);
- CHECK(added_accessor);
- }
- obj_template->SetInternalFieldCount((internal_field+1)*2 + 7);
- context->Global()->Set(v8_str(class_name), constructor->GetFunction());
- return obj_template;
-}
-
-
-static void VerifyRead(v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
- int internal_field,
- void* internal_object,
- v8::Handle<v8::Value> expected_value) {
- LocalContext local_context;
- v8::HandleScope scope(local_context->GetIsolate());
- v8::Handle<v8::Context> context = local_context.local();
- CreateConstructor(context, "Accessible", internal_field, "x", descriptor);
- // Setup object.
- CompileRun("var accessible = new Accessible();");
- v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(
- context->Global()->Get(v8_str("accessible")));
- obj->SetAlignedPointerInInternalField(internal_field, internal_object);
- bool added_accessor;
- added_accessor = obj->SetDeclaredAccessor(v8_str("y"), descriptor);
- CHECK(added_accessor);
- added_accessor = obj->SetDeclaredAccessor(v8_str("13"), descriptor);
- CHECK(added_accessor);
- // Test access from template getter.
- v8::Local<v8::Value> value;
- value = CompileRun("accessible.x;");
- CHECK_EQ(expected_value, value);
- value = CompileRun("accessible['x'];");
- CHECK_EQ(expected_value, value);
- // Test access from object getter.
- value = CompileRun("accessible.y;");
- CHECK_EQ(expected_value, value);
- value = CompileRun("accessible['y'];");
- CHECK_EQ(expected_value, value);
- value = CompileRun("accessible[13];");
- CHECK_EQ(expected_value, value);
- value = CompileRun("accessible['13'];");
- CHECK_EQ(expected_value, value);
-}
-
-
-static v8::Handle<v8::Value> Convert(int32_t value, v8::Isolate* isolate) {
- return v8::Integer::New(isolate, value);
-}
-
-
-static v8::Handle<v8::Value> Convert(float value, v8::Isolate* isolate) {
- return v8::Number::New(isolate, value);
-}
-
-
-static v8::Handle<v8::Value> Convert(double value, v8::Isolate* isolate) {
- return v8::Number::New(isolate, value);
-}
-
-
-typedef v8::ObjectOperationDescriptor OOD;
-
-template<typename T>
-static void TestPrimitiveValue(
- T value,
- v8::DeclaredAccessorDescriptorDataType data_type,
- DescriptorTestHelper* helper) {
- v8::HandleScope handle_scope(helper->isolate_);
- int index = 17;
- int internal_field = 6;
- v8::Handle<v8::DeclaredAccessorDescriptor> descriptor =
- OOD::NewInternalFieldDereference(helper->isolate_, internal_field)
- ->NewRawShift(helper->isolate_, static_cast<uint16_t>(index*sizeof(T)))
- ->NewPrimitiveValue(helper->isolate_, data_type, 0);
- v8::Handle<v8::Value> expected = Convert(value, helper->isolate_);
- helper->array_->Reset();
- helper->array_->As<T*>()[index] = value;
- VerifyRead(descriptor, internal_field, helper->array_.get(), expected);
-}
-
-
-TEST(PrimitiveValueRead) {
- DescriptorTestHelper helper;
- TestPrimitiveValue<int32_t>(203, v8::kDescriptorInt32Type, &helper);
- TestPrimitiveValue<float>(23.7f, v8::kDescriptorFloatType, &helper);
- TestPrimitiveValue<double>(23.7, v8::kDescriptorDoubleType, &helper);
-}
-
-
-template<typename T>
-static void TestBitmaskCompare(T bitmask,
- T compare_value,
- DescriptorTestHelper* helper) {
- v8::HandleScope handle_scope(helper->isolate_);
- int index = 13;
- int internal_field = 4;
- v8::Handle<v8::RawOperationDescriptor> raw_descriptor =
- OOD::NewInternalFieldDereference(helper->isolate_, internal_field)
- ->NewRawShift(helper->isolate_, static_cast<uint16_t>(index*sizeof(T)));
- v8::Handle<v8::DeclaredAccessorDescriptor> descriptor;
- switch (sizeof(T)) {
- case 1:
- descriptor = raw_descriptor->NewBitmaskCompare8(
- helper->isolate_,
- static_cast<uint8_t>(bitmask),
- static_cast<uint8_t>(compare_value));
- break;
- case 2:
- descriptor = raw_descriptor->NewBitmaskCompare16(
- helper->isolate_,
- static_cast<uint16_t>(bitmask),
- static_cast<uint16_t>(compare_value));
- break;
- case 4:
- descriptor = raw_descriptor->NewBitmaskCompare32(
- helper->isolate_,
- static_cast<uint32_t>(bitmask),
- static_cast<uint32_t>(compare_value));
- break;
- default:
- CHECK(false);
- break;
- }
- AlignedArray* array = helper->array_.get();
- array->Reset();
- VerifyRead(descriptor, internal_field, array, v8::False(helper->isolate_));
- array->As<T*>()[index] = compare_value;
- VerifyRead(descriptor, internal_field, array, v8::True(helper->isolate_));
- helper->array_->As<T*>()[index] = compare_value & bitmask;
- VerifyRead(descriptor, internal_field, array, v8::True(helper->isolate_));
-}
-
-
-TEST(BitmaskCompareRead) {
- DescriptorTestHelper helper;
- TestBitmaskCompare<uint8_t>(0xf3, 0xa8, &helper);
- TestBitmaskCompare<uint16_t>(0xfefe, 0x7d42, &helper);
- TestBitmaskCompare<uint32_t>(0xfefeab18, 0x1234fdec, &helper);
-}
-
-
-TEST(PointerCompareRead) {
- DescriptorTestHelper helper;
- v8::HandleScope handle_scope(helper.isolate_);
- int index = 35;
- int internal_field = 3;
- void* ptr = helper.isolate_;
- v8::Handle<v8::DeclaredAccessorDescriptor> descriptor =
- OOD::NewInternalFieldDereference(helper.isolate_, internal_field)
- ->NewRawShift(helper.isolate_, static_cast<uint16_t>(index*sizeof(ptr)))
- ->NewPointerCompare(helper.isolate_, ptr);
- AlignedArray* array = helper.array_.get();
- VerifyRead(descriptor, internal_field, array, v8::False(helper.isolate_));
- array->As<uintptr_t*>()[index] = reinterpret_cast<uintptr_t>(ptr);
- VerifyRead(descriptor, internal_field, array, v8::True(helper.isolate_));
-}
-
-
-TEST(PointerDereferenceRead) {
- DescriptorTestHelper helper;
- v8::HandleScope handle_scope(helper.isolate_);
- int first_index = 13;
- int internal_field = 7;
- int second_index = 11;
- int pointed_to_index = 75;
- uint16_t expected = 0x1425;
- v8::Handle<v8::DeclaredAccessorDescriptor> descriptor =
- OOD::NewInternalFieldDereference(helper.isolate_, internal_field)
- ->NewRawShift(helper.isolate_, first_index*kPointerSize)
- ->NewRawDereference(helper.isolate_)
- ->NewRawShift(helper.isolate_,
- static_cast<uint16_t>(second_index*sizeof(int16_t)))
- ->NewPrimitiveValue(helper.isolate_, v8::kDescriptorInt16Type, 0);
- AlignedArray* array = helper.array_.get();
- array->As<uintptr_t**>()[first_index] =
- &array->As<uintptr_t*>()[pointed_to_index];
- VerifyRead(descriptor, internal_field, array,
- v8::Integer::New(helper.isolate_, 0));
- second_index += pointed_to_index*sizeof(uintptr_t)/sizeof(uint16_t);
- array->As<uint16_t*>()[second_index] = expected;
- VerifyRead(descriptor, internal_field, array,
- v8::Integer::New(helper.isolate_, expected));
-}
-
-
-TEST(HandleDereferenceRead) {
- DescriptorTestHelper helper;
- v8::HandleScope handle_scope(helper.isolate_);
- int index = 13;
- int internal_field = 0;
- v8::Handle<v8::DeclaredAccessorDescriptor> descriptor =
- OOD::NewInternalFieldDereference(helper.isolate_, internal_field)
- ->NewRawShift(helper.isolate_, index*kPointerSize)
- ->NewHandleDereference(helper.isolate_);
- HandleArray* array = helper.handle_array_.get();
- v8::Handle<v8::String> expected = v8_str("whatever");
- array->handles_[index].Reset(helper.isolate_, expected);
- VerifyRead(descriptor, internal_field, array, expected);
-}
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 06afdd2be7..5d487bb7da 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -159,13 +159,13 @@ void DeclarationContext::Check(const char* source,
if (expectations == EXPECT_RESULT) {
CHECK(!catcher.HasCaught());
if (!value.IsEmpty()) {
- CHECK_EQ(value, result);
+ CHECK(value->Equals(result));
}
} else {
CHECK(expectations == EXPECT_EXCEPTION);
CHECK(catcher.HasCaught());
if (!value.IsEmpty()) {
- CHECK_EQ(value, catcher.Exception());
+ CHECK(value->Equals(catcher.Exception()));
}
}
// Clean slate for the next test.
@@ -580,13 +580,13 @@ class SimpleContext {
if (expectations == EXPECT_RESULT) {
CHECK(!catcher.HasCaught());
if (!value.IsEmpty()) {
- CHECK_EQ(value, result);
+ CHECK(value->Equals(result));
}
} else {
CHECK(expectations == EXPECT_EXCEPTION);
CHECK(catcher.HasCaught());
if (!value.IsEmpty()) {
- CHECK_EQ(value, catcher.Exception());
+ CHECK(value->Equals(catcher.Exception()));
}
}
}
@@ -676,7 +676,6 @@ TEST(CrossScriptReferences_Simple2) {
TEST(CrossScriptReferencesHarmony) {
i::FLAG_harmony_scoping = true;
- i::FLAG_harmony_modules = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
@@ -687,7 +686,6 @@ TEST(CrossScriptReferencesHarmony) {
"'use strict'; function x() { return 1 }; x()", "x()",
"'use strict'; let x = 1; x", "x",
"'use strict'; const x = 1; x", "x",
- "'use strict'; module x { export let a = 1 }; x.a", "x.a",
NULL
};
@@ -823,7 +821,6 @@ TEST(CrossScriptReferencesHarmony) {
TEST(GlobalLexicalOSR) {
i::FLAG_use_strict = true;
i::FLAG_harmony_scoping = true;
- i::FLAG_harmony_modules = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
@@ -848,7 +845,6 @@ TEST(GlobalLexicalOSR) {
TEST(CrossScriptConflicts) {
i::FLAG_use_strict = true;
i::FLAG_harmony_scoping = true;
- i::FLAG_harmony_modules = true;
HandleScope scope(CcTest::isolate());
@@ -857,7 +853,6 @@ TEST(CrossScriptConflicts) {
"function x() { return 1 }; x()",
"let x = 1; x",
"const x = 1; x",
- "module x { export let a = 1 }; x.a",
NULL
};
const char* seconds[] = {
@@ -865,7 +860,6 @@ TEST(CrossScriptConflicts) {
"function x() { return 2 }; x()",
"let x = 2; x",
"const x = 2; x",
- "module x { export let a = 2 }; x.a",
NULL
};
@@ -1151,3 +1145,21 @@ TEST(CrossScriptAssignmentToConst) {
context.Check("x", EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
}
}
+
+
+TEST(Regress425510) {
+ i::FLAG_harmony_scoping = true;
+ i::FLAG_allow_natives_syntax = true;
+
+ HandleScope handle_scope(CcTest::isolate());
+
+ {
+ SimpleContext context;
+
+ context.Check("'use strict'; o; const o = 10", EXPECT_EXCEPTION);
+
+ for (int i = 0; i < 100; i++) {
+ context.Check("o.prototype", EXPECT_EXCEPTION);
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index a201ccd7e4..e4c8e03314 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -395,7 +395,7 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationADDString) {
v8::Handle<v8::Value> result = env->Global()->Get(v8_str("result"));
CHECK(result->IsString());
v8::String::Utf8Value utf8(result);
- CHECK_EQ("a+an X", *utf8);
+ CHECK_EQ(0, strcmp("a+an X", *utf8));
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
}
isolate->Exit();
diff --git a/deps/v8/test/cctest/test-disasm-ppc.cc b/deps/v8/test/cctest/test-disasm-ppc.cc
new file mode 100644
index 0000000000..87b9ade055
--- /dev/null
+++ b/deps/v8/test/cctest/test-disasm-ppc.cc
@@ -0,0 +1,155 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/debug.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+bool DisassembleAndCompare(byte* pc, const char* compare_string) {
+ disasm::NameConverter converter;
+ disasm::Disassembler disasm(converter);
+ EmbeddedVector<char, 128> disasm_buffer;
+
+ disasm.InstructionDecode(disasm_buffer, pc);
+
+ if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+ fprintf(stderr,
+ "expected: \n"
+ "%s\n"
+ "disassembled: \n"
+ "%s\n\n",
+ compare_string, disasm_buffer.start());
+ return false;
+ }
+ return true;
+}
+
+
+// Set up V8 to a state where we can at least run the assembler and
+// disassembler. Declare the variables and allocate the data structures used
+// in the rest of the macros.
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(isolate, buffer, 4 * 1024); \
+ bool failure = false;
+
+
+// This macro assembles one instruction using the preallocated assembler and
+// disassembles the generated instruction, comparing the output to the expected
+// value. If the comparison fails an error message is printed, but the test
+// continues to run until the end.
+#define COMPARE(asm_, compare_string) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte* progcounter = &buffer[pc_offset]; \
+ assm.asm_; \
+ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
+ }
+
+// Force emission of any pending literals into a pool.
+#define EMIT_PENDING_LITERALS() assm.CheckConstPool(true, false)
+
+
+// Verify that all invocations of the COMPARE macro passed successfully.
+// Exit with a failure if at least one of the tests failed.
+#define VERIFY_RUN() \
+ if (failure) { \
+ V8_Fatal(__FILE__, __LINE__, "PPC Disassembler tests failed.\n"); \
+ }
+
+TEST(DisasmPPC) {
+ SET_UP();
+
+ COMPARE(addc(r9, r7, r9), "7d274814 addc r9, r7, r9");
+ COMPARE(addic(r3, r5, Operand(20)), "30650014 addic r3, r5, 20");
+ COMPARE(addi(r0, ip, Operand(63)), "380c003f addi r0, r12, 63");
+ COMPARE(add(r5, r7, r0), "7ca70214 add r5, r7, r0");
+ COMPARE(addze(r0, r0, LeaveOE, SetRC), "7c000195 addze. r0, r0");
+ COMPARE(andi(r0, r3, Operand(4)), "70600004 andi. r0, r3, 4");
+ COMPARE(and_(r3, r6, r5), "7cc32838 and r3, r6, r5");
+ COMPARE(and_(r6, r0, r6, SetRC), "7c063039 and. r6, r0, r6");
+ // skipping branches (for now?)
+ COMPARE(bctr(), "4e800420 bctr");
+ COMPARE(blr(), "4e800020 blr");
+ COMPARE(bclr(BA, SetLK), "4e800021 blrl");
+// skipping call - only used in simulator
+#if V8_TARGET_ARCH_PPC64
+ COMPARE(cmpi(r0, Operand(5)), "2fa00005 cmpi r0, 5");
+#else
+ COMPARE(cmpi(r0, Operand(5)), "2f800005 cmpi r0, 5");
+#endif
+#if V8_TARGET_ARCH_PPC64
+ COMPARE(cmpl(r6, r7), "7fa63840 cmpl r6, r7");
+#else
+ COMPARE(cmpl(r6, r7), "7f863840 cmpl r6, r7");
+#endif
+#if V8_TARGET_ARCH_PPC64
+ COMPARE(cmp(r5, r11), "7fa55800 cmp r5, r11");
+#else
+ COMPARE(cmp(r5, r11), "7f855800 cmp r5, r11");
+#endif
+ // skipping crxor - incomplete disassembly
+ COMPARE(lbz(r4, MemOperand(r4, 7)), "88840007 lbz r4, 7(r4)");
+ COMPARE(lfd(d0, MemOperand(sp, 128)), "c8010080 lfd d0, 128(sp)");
+ COMPARE(li(r0, Operand(16)), "38000010 li r0, 16");
+ COMPARE(lis(r8, Operand(22560)), "3d005820 lis r8, 22560");
+ COMPARE(lwz(ip, MemOperand(r19, 44)), "8193002c lwz r12, 44(r19)");
+ COMPARE(lwzx(r0, MemOperand(r5, ip)), "7c05602e lwzx r0, r5, r12");
+ COMPARE(mflr(r0), "7c0802a6 mflr r0");
+ COMPARE(mr(r15, r4), "7c8f2378 mr r15, r4");
+ COMPARE(mtctr(r0), "7c0903a6 mtctr r0");
+ COMPARE(mtlr(r15), "7de803a6 mtlr r15");
+ COMPARE(ori(r8, r8, Operand(42849)), "6108a761 ori r8, r8, 42849");
+ COMPARE(orx(r5, r3, r4), "7c652378 or r5, r3, r4");
+ COMPARE(rlwinm(r4, r3, 2, 0, 29), "5464103a rlwinm r4, r3, 2, 0, 29");
+ COMPARE(rlwinm(r0, r3, 0, 31, 31, SetRC),
+ "546007ff rlwinm. r0, r3, 0, 31, 31");
+ COMPARE(srawi(r3, r6, 1), "7cc30e70 srawi r3,r6,1");
+ COMPARE(stb(r5, MemOperand(r11, 11)), "98ab000b stb r5, 11(r11)");
+ COMPARE(stfd(d2, MemOperand(sp, 8)), "d8410008 stfd d2, 8(sp)");
+ COMPARE(stw(r16, MemOperand(sp, 64)), "92010040 stw r16, 64(sp)");
+ COMPARE(stwu(r3, MemOperand(sp, -4)), "9461fffc stwu r3, -4(sp)");
+ COMPARE(sub(r3, r3, r4), "7c641850 subf r3, r4, r3");
+ COMPARE(sub(r0, r9, r8, LeaveOE, SetRC), "7c084851 subf. r0, r8, r9");
+ COMPARE(xor_(r6, r5, r4), "7ca62278 xor r6, r5, r4");
+
+ VERIFY_RUN();
+}
diff --git a/deps/v8/test/cctest/test-double.cc b/deps/v8/test/cctest/test-double.cc
index 16dcb37101..cf9fbbd1d0 100644
--- a/deps/v8/test/cctest/test-double.cc
+++ b/deps/v8/test/cctest/test-double.cc
@@ -105,7 +105,7 @@ TEST(IsDenormal) {
TEST(IsSpecial) {
CHECK(Double(V8_INFINITY).IsSpecial());
CHECK(Double(-V8_INFINITY).IsSpecial());
- CHECK(Double(v8::base::OS::nan_value()).IsSpecial());
+ CHECK(Double(std::numeric_limits<double>::quiet_NaN()).IsSpecial());
uint64_t bits = V8_2PART_UINT64_C(0xFFF12345, 00000000);
CHECK(Double(bits).IsSpecial());
// Denormals are not special:
@@ -128,7 +128,7 @@ TEST(IsSpecial) {
TEST(IsInfinite) {
CHECK(Double(V8_INFINITY).IsInfinite());
CHECK(Double(-V8_INFINITY).IsInfinite());
- CHECK(!Double(v8::base::OS::nan_value()).IsInfinite());
+ CHECK(!Double(std::numeric_limits<double>::quiet_NaN()).IsInfinite());
CHECK(!Double(0.0).IsInfinite());
CHECK(!Double(-0.0).IsInfinite());
CHECK(!Double(1.0).IsInfinite());
diff --git a/deps/v8/test/cctest/test-dtoa.cc b/deps/v8/test/cctest/test-dtoa.cc
index 3f396a5d1b..52a354ffa8 100644
--- a/deps/v8/test/cctest/test-dtoa.cc
+++ b/deps/v8/test/cctest/test-dtoa.cc
@@ -64,87 +64,87 @@ TEST(DtoaVariousDoubles) {
int sign;
DoubleToAscii(0.0, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ("0", buffer.start());
+ CHECK_EQ(0, strcmp("0", buffer.start()));
CHECK_EQ(1, point);
DoubleToAscii(0.0, DTOA_FIXED, 2, buffer, &sign, &length, &point);
CHECK_EQ(1, length);
- CHECK_EQ("0", buffer.start());
+ CHECK_EQ(0, strcmp("0", buffer.start()));
CHECK_EQ(1, point);
DoubleToAscii(0.0, DTOA_PRECISION, 3, buffer, &sign, &length, &point);
CHECK_EQ(1, length);
- CHECK_EQ("0", buffer.start());
+ CHECK_EQ(0, strcmp("0", buffer.start()));
CHECK_EQ(1, point);
DoubleToAscii(1.0, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
DoubleToAscii(1.0, DTOA_FIXED, 3, buffer, &sign, &length, &point);
CHECK_GE(3, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
DoubleToAscii(1.0, DTOA_PRECISION, 3, buffer, &sign, &length, &point);
CHECK_GE(3, length);
TrimRepresentation(buffer);
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
DoubleToAscii(1.5, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ("15", buffer.start());
+ CHECK_EQ(0, strcmp("15", buffer.start()));
CHECK_EQ(1, point);
DoubleToAscii(1.5, DTOA_FIXED, 10, buffer, &sign, &length, &point);
CHECK_GE(10, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("15", buffer.start());
+ CHECK_EQ(0, strcmp("15", buffer.start()));
CHECK_EQ(1, point);
DoubleToAscii(1.5, DTOA_PRECISION, 10, buffer, &sign, &length, &point);
CHECK_GE(10, length);
TrimRepresentation(buffer);
- CHECK_EQ("15", buffer.start());
+ CHECK_EQ(0, strcmp("15", buffer.start()));
CHECK_EQ(1, point);
double min_double = 5e-324;
DoubleToAscii(min_double, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ("5", buffer.start());
+ CHECK_EQ(0, strcmp("5", buffer.start()));
CHECK_EQ(-323, point);
DoubleToAscii(min_double, DTOA_FIXED, 5, buffer, &sign, &length, &point);
CHECK_GE(5, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("", buffer.start());
+ CHECK_EQ(0, strcmp("", buffer.start()));
CHECK_GE(-5, point);
DoubleToAscii(min_double, DTOA_PRECISION, 5, buffer, &sign, &length, &point);
CHECK_GE(5, length);
TrimRepresentation(buffer);
- CHECK_EQ("49407", buffer.start());
+ CHECK_EQ(0, strcmp("49407", buffer.start()));
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
DoubleToAscii(max_double, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ("17976931348623157", buffer.start());
+ CHECK_EQ(0, strcmp("17976931348623157", buffer.start()));
CHECK_EQ(309, point);
DoubleToAscii(max_double, DTOA_PRECISION, 7, buffer, &sign, &length, &point);
CHECK_GE(7, length);
TrimRepresentation(buffer);
- CHECK_EQ("1797693", buffer.start());
+ CHECK_EQ(0, strcmp("1797693", buffer.start()));
CHECK_EQ(309, point);
DoubleToAscii(4294967272.0, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ("4294967272", buffer.start());
+ CHECK_EQ(0, strcmp("4294967272", buffer.start()));
CHECK_EQ(10, point);
DoubleToAscii(4294967272.0, DTOA_FIXED, 5, buffer, &sign, &length, &point);
CHECK_GE(5, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("4294967272", buffer.start());
+ CHECK_EQ(0, strcmp("4294967272", buffer.start()));
CHECK_EQ(10, point);
@@ -152,37 +152,37 @@ TEST(DtoaVariousDoubles) {
buffer, &sign, &length, &point);
CHECK_GE(14, length);
TrimRepresentation(buffer);
- CHECK_EQ("4294967272", buffer.start());
+ CHECK_EQ(0, strcmp("4294967272", buffer.start()));
CHECK_EQ(10, point);
DoubleToAscii(4.1855804968213567e298, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
- CHECK_EQ("4185580496821357", buffer.start());
+ CHECK_EQ(0, strcmp("4185580496821357", buffer.start()));
CHECK_EQ(299, point);
DoubleToAscii(4.1855804968213567e298, DTOA_PRECISION, 20,
buffer, &sign, &length, &point);
CHECK_GE(20, length);
TrimRepresentation(buffer);
- CHECK_EQ("41855804968213567225", buffer.start());
+ CHECK_EQ(0, strcmp("41855804968213567225", buffer.start()));
CHECK_EQ(299, point);
DoubleToAscii(5.5626846462680035e-309, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
- CHECK_EQ("5562684646268003", buffer.start());
+ CHECK_EQ(0, strcmp("5562684646268003", buffer.start()));
CHECK_EQ(-308, point);
DoubleToAscii(5.5626846462680035e-309, DTOA_PRECISION, 1,
buffer, &sign, &length, &point);
CHECK_GE(1, length);
TrimRepresentation(buffer);
- CHECK_EQ("6", buffer.start());
+ CHECK_EQ(0, strcmp("6", buffer.start()));
CHECK_EQ(-308, point);
DoubleToAscii(-2147483648.0, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
CHECK_EQ(1, sign);
- CHECK_EQ("2147483648", buffer.start());
+ CHECK_EQ(0, strcmp("2147483648", buffer.start()));
CHECK_EQ(10, point);
@@ -190,7 +190,7 @@ TEST(DtoaVariousDoubles) {
CHECK_GE(2, length - point);
TrimRepresentation(buffer);
CHECK_EQ(1, sign);
- CHECK_EQ("2147483648", buffer.start());
+ CHECK_EQ(0, strcmp("2147483648", buffer.start()));
CHECK_EQ(10, point);
DoubleToAscii(-2147483648.0, DTOA_PRECISION, 5,
@@ -198,13 +198,13 @@ TEST(DtoaVariousDoubles) {
CHECK_GE(5, length);
TrimRepresentation(buffer);
CHECK_EQ(1, sign);
- CHECK_EQ("21475", buffer.start());
+ CHECK_EQ(0, strcmp("21475", buffer.start()));
CHECK_EQ(10, point);
DoubleToAscii(-3.5844466002796428e+298, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
CHECK_EQ(1, sign);
- CHECK_EQ("35844466002796428", buffer.start());
+ CHECK_EQ(0, strcmp("35844466002796428", buffer.start()));
CHECK_EQ(299, point);
DoubleToAscii(-3.5844466002796428e+298, DTOA_PRECISION, 10,
@@ -212,54 +212,54 @@ TEST(DtoaVariousDoubles) {
CHECK_EQ(1, sign);
CHECK_GE(10, length);
TrimRepresentation(buffer);
- CHECK_EQ("35844466", buffer.start());
+ CHECK_EQ(0, strcmp("35844466", buffer.start()));
CHECK_EQ(299, point);
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
DoubleToAscii(v, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ("22250738585072014", buffer.start());
+ CHECK_EQ(0, strcmp("22250738585072014", buffer.start()));
CHECK_EQ(-307, point);
DoubleToAscii(v, DTOA_PRECISION, 20, buffer, &sign, &length, &point);
CHECK_GE(20, length);
TrimRepresentation(buffer);
- CHECK_EQ("22250738585072013831", buffer.start());
+ CHECK_EQ(0, strcmp("22250738585072013831", buffer.start()));
CHECK_EQ(-307, point);
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
v = Double(largest_denormal64).value();
DoubleToAscii(v, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ("2225073858507201", buffer.start());
+ CHECK_EQ(0, strcmp("2225073858507201", buffer.start()));
CHECK_EQ(-307, point);
DoubleToAscii(v, DTOA_PRECISION, 20, buffer, &sign, &length, &point);
CHECK_GE(20, length);
TrimRepresentation(buffer);
- CHECK_EQ("2225073858507200889", buffer.start());
+ CHECK_EQ(0, strcmp("2225073858507200889", buffer.start()));
CHECK_EQ(-307, point);
DoubleToAscii(4128420500802942e-24, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
CHECK_EQ(0, sign);
- CHECK_EQ("4128420500802942", buffer.start());
+ CHECK_EQ(0, strcmp("4128420500802942", buffer.start()));
CHECK_EQ(-8, point);
v = -3.9292015898194142585311918e-10;
DoubleToAscii(v, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ("39292015898194143", buffer.start());
+ CHECK_EQ(0, strcmp("39292015898194143", buffer.start()));
v = 4194304.0;
DoubleToAscii(v, DTOA_FIXED, 5, buffer, &sign, &length, &point);
CHECK_GE(5, length - point);
TrimRepresentation(buffer);
- CHECK_EQ("4194304", buffer.start());
+ CHECK_EQ(0, strcmp("4194304", buffer.start()));
v = 3.3161339052167390562200598e-237;
DoubleToAscii(v, DTOA_PRECISION, 19, buffer, &sign, &length, &point);
CHECK_GE(19, length);
TrimRepresentation(buffer);
- CHECK_EQ("3316133905216739056", buffer.start());
+ CHECK_EQ(0, strcmp("3316133905216739056", buffer.start()));
CHECK_EQ(-236, point);
}
@@ -279,7 +279,7 @@ TEST(DtoaGayShortest) {
DoubleToAscii(v, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
CHECK_EQ(0, sign); // All precomputed numbers are positive.
CHECK_EQ(current_test.decimal_point, point);
- CHECK_EQ(current_test.representation, buffer.start());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
}
}
@@ -302,7 +302,7 @@ TEST(DtoaGayFixed) {
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length - point);
TrimRepresentation(buffer);
- CHECK_EQ(current_test.representation, buffer.start());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
}
}
@@ -326,6 +326,6 @@ TEST(DtoaGayPrecision) {
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length);
TrimRepresentation(buffer);
- CHECK_EQ(current_test.representation, buffer.start());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
}
}
diff --git a/deps/v8/test/cctest/test-fast-dtoa.cc b/deps/v8/test/cctest/test-fast-dtoa.cc
index 52198a45f2..0ed26b1690 100644
--- a/deps/v8/test/cctest/test-fast-dtoa.cc
+++ b/deps/v8/test/cctest/test-fast-dtoa.cc
@@ -64,44 +64,44 @@ TEST(FastDtoaShortestVariousDoubles) {
status = FastDtoa(min_double, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("5", buffer.start());
+ CHECK_EQ(0, strcmp("5", buffer.start()));
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
status = FastDtoa(max_double, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("17976931348623157", buffer.start());
+ CHECK_EQ(0, strcmp("17976931348623157", buffer.start()));
CHECK_EQ(309, point);
status = FastDtoa(4294967272.0, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("4294967272", buffer.start());
+ CHECK_EQ(0, strcmp("4294967272", buffer.start()));
CHECK_EQ(10, point);
status = FastDtoa(4.1855804968213567e298, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("4185580496821357", buffer.start());
+ CHECK_EQ(0, strcmp("4185580496821357", buffer.start()));
CHECK_EQ(299, point);
status = FastDtoa(5.5626846462680035e-309, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("5562684646268003", buffer.start());
+ CHECK_EQ(0, strcmp("5562684646268003", buffer.start()));
CHECK_EQ(-308, point);
status = FastDtoa(2147483648.0, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("2147483648", buffer.start());
+ CHECK_EQ(0, strcmp("2147483648", buffer.start()));
CHECK_EQ(10, point);
status = FastDtoa(3.5844466002796428e+298, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
if (status) { // Not all FastDtoa variants manage to compute this number.
- CHECK_EQ("35844466002796428", buffer.start());
+ CHECK_EQ(0, strcmp("35844466002796428", buffer.start()));
CHECK_EQ(299, point);
}
@@ -109,7 +109,7 @@ TEST(FastDtoaShortestVariousDoubles) {
double v = Double(smallest_normal64).value();
status = FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, &length, &point);
if (status) {
- CHECK_EQ("22250738585072014", buffer.start());
+ CHECK_EQ(0, strcmp("22250738585072014", buffer.start()));
CHECK_EQ(-307, point);
}
@@ -117,7 +117,7 @@ TEST(FastDtoaShortestVariousDoubles) {
v = Double(largest_denormal64).value();
status = FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, &length, &point);
if (status) {
- CHECK_EQ("2225073858507201", buffer.start());
+ CHECK_EQ(0, strcmp("2225073858507201", buffer.start()));
CHECK_EQ(-307, point);
}
}
@@ -134,14 +134,14 @@ TEST(FastDtoaPrecisionVariousDoubles) {
CHECK(status);
CHECK_GE(3, length);
TrimRepresentation(buffer);
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
status = FastDtoa(1.5, FAST_DTOA_PRECISION, 10, buffer, &length, &point);
if (status) {
CHECK_GE(10, length);
TrimRepresentation(buffer);
- CHECK_EQ("15", buffer.start());
+ CHECK_EQ(0, strcmp("15", buffer.start()));
CHECK_EQ(1, point);
}
@@ -149,14 +149,14 @@ TEST(FastDtoaPrecisionVariousDoubles) {
status = FastDtoa(min_double, FAST_DTOA_PRECISION, 5,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("49407", buffer.start());
+ CHECK_EQ(0, strcmp("49407", buffer.start()));
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
status = FastDtoa(max_double, FAST_DTOA_PRECISION, 7,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("1797693", buffer.start());
+ CHECK_EQ(0, strcmp("1797693", buffer.start()));
CHECK_EQ(309, point);
status = FastDtoa(4294967272.0, FAST_DTOA_PRECISION, 14,
@@ -164,26 +164,26 @@ TEST(FastDtoaPrecisionVariousDoubles) {
if (status) {
CHECK_GE(14, length);
TrimRepresentation(buffer);
- CHECK_EQ("4294967272", buffer.start());
+ CHECK_EQ(0, strcmp("4294967272", buffer.start()));
CHECK_EQ(10, point);
}
status = FastDtoa(4.1855804968213567e298, FAST_DTOA_PRECISION, 17,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("41855804968213567", buffer.start());
+ CHECK_EQ(0, strcmp("41855804968213567", buffer.start()));
CHECK_EQ(299, point);
status = FastDtoa(5.5626846462680035e-309, FAST_DTOA_PRECISION, 1,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("6", buffer.start());
+ CHECK_EQ(0, strcmp("6", buffer.start()));
CHECK_EQ(-308, point);
status = FastDtoa(2147483648.0, FAST_DTOA_PRECISION, 5,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ("21475", buffer.start());
+ CHECK_EQ(0, strcmp("21475", buffer.start()));
CHECK_EQ(10, point);
status = FastDtoa(3.5844466002796428e+298, FAST_DTOA_PRECISION, 10,
@@ -191,14 +191,14 @@ TEST(FastDtoaPrecisionVariousDoubles) {
CHECK(status);
CHECK_GE(10, length);
TrimRepresentation(buffer);
- CHECK_EQ("35844466", buffer.start());
+ CHECK_EQ(0, strcmp("35844466", buffer.start()));
CHECK_EQ(299, point);
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
status = FastDtoa(v, FAST_DTOA_PRECISION, 17, buffer, &length, &point);
CHECK(status);
- CHECK_EQ("22250738585072014", buffer.start());
+ CHECK_EQ(0, strcmp("22250738585072014", buffer.start()));
CHECK_EQ(-307, point);
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
@@ -207,19 +207,19 @@ TEST(FastDtoaPrecisionVariousDoubles) {
CHECK(status);
CHECK_GE(20, length);
TrimRepresentation(buffer);
- CHECK_EQ("22250738585072009", buffer.start());
+ CHECK_EQ(0, strcmp("22250738585072009", buffer.start()));
CHECK_EQ(-307, point);
v = 3.3161339052167390562200598e-237;
status = FastDtoa(v, FAST_DTOA_PRECISION, 18, buffer, &length, &point);
CHECK(status);
- CHECK_EQ("331613390521673906", buffer.start());
+ CHECK_EQ(0, strcmp("331613390521673906", buffer.start()));
CHECK_EQ(-236, point);
v = 7.9885183916008099497815232e+191;
status = FastDtoa(v, FAST_DTOA_PRECISION, 4, buffer, &length, &point);
CHECK(status);
- CHECK_EQ("7989", buffer.start());
+ CHECK_EQ(0, strcmp("7989", buffer.start()));
CHECK_EQ(192, point);
}
@@ -246,7 +246,7 @@ TEST(FastDtoaGayShortest) {
if (length == kFastDtoaMaximalLength) needed_max_length = true;
succeeded++;
CHECK_EQ(current_test.decimal_point, point);
- CHECK_EQ(current_test.representation, buffer.start());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
}
CHECK_GT(succeeded*1.0/total, 0.99);
CHECK(needed_max_length);
@@ -281,7 +281,7 @@ TEST(FastDtoaGayPrecision) {
if (number_digits <= 15) succeeded_15++;
TrimRepresentation(buffer);
CHECK_EQ(current_test.decimal_point, point);
- CHECK_EQ(current_test.representation, buffer.start());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
}
// The precomputed numbers contain many entries with many requested
// digits. These have a high failure rate and we therefore expect a lower
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index fa2f195b64..89c475eab5 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -171,44 +171,43 @@ TEST(VectorICProfilerStatistics) {
Handle<JSFunction> f = v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
// There should be one IC.
- Code* code = f->shared()->code();
+ Handle<Code> code = handle(f->shared()->code(), isolate);
TypeFeedbackInfo* feedback_info =
TypeFeedbackInfo::cast(code->type_feedback_info());
CHECK_EQ(1, feedback_info->ic_total_count());
CHECK_EQ(0, feedback_info->ic_with_type_info_count());
CHECK_EQ(0, feedback_info->ic_generic_count());
- TypeFeedbackVector* feedback_vector = f->shared()->feedback_vector();
+ Handle<TypeFeedbackVector> feedback_vector =
+ handle(f->shared()->feedback_vector(), isolate);
+ int ic_slot = 0;
+ CallICNexus nexus(feedback_vector, FeedbackVectorICSlot(ic_slot));
CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
CHECK_EQ(0, feedback_vector->ic_generic_count());
// Now send the information generic.
CompileRun("f(Object);");
- feedback_vector = f->shared()->feedback_vector();
CHECK_EQ(0, feedback_vector->ic_with_type_info_count());
CHECK_EQ(1, feedback_vector->ic_generic_count());
- // A collection will make the site uninitialized again.
+ // A collection will not affect the site.
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
- feedback_vector = f->shared()->feedback_vector();
CHECK_EQ(0, feedback_vector->ic_with_type_info_count());
- CHECK_EQ(0, feedback_vector->ic_generic_count());
+ CHECK_EQ(1, feedback_vector->ic_generic_count());
// The Array function is special. A call to array remains monomorphic
// and isn't cleared by gc because an AllocationSite is being held.
+ // Clear the IC manually in order to test this case.
+ nexus.Clear(*code);
CompileRun("f(Array);");
- feedback_vector = f->shared()->feedback_vector();
CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
CHECK_EQ(0, feedback_vector->ic_generic_count());
- int ic_slot = 0;
- CHECK(
- feedback_vector->Get(FeedbackVectorICSlot(ic_slot))->IsAllocationSite());
+
+ CHECK(nexus.GetFeedback()->IsAllocationSite());
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
- feedback_vector = f->shared()->feedback_vector();
CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
CHECK_EQ(0, feedback_vector->ic_generic_count());
- CHECK(
- feedback_vector->Get(FeedbackVectorICSlot(ic_slot))->IsAllocationSite());
+ CHECK(nexus.GetFeedback()->IsAllocationSite());
}
@@ -233,20 +232,21 @@ TEST(VectorCallICStates) {
CallICNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
// CallIC doesn't return map feedback.
- CHECK_EQ(NULL, nexus.FindFirstMap());
+ CHECK(!nexus.FindFirstMap());
CompileRun("f(function() { return 16; })");
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
- // After a collection, state should be reset to UNINITIALIZED.
+ // After a collection, state should remain GENERIC.
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(UNINITIALIZED, nexus.StateFromFeedback());
+ CHECK_EQ(GENERIC, nexus.StateFromFeedback());
- // Array is special. It will remain monomorphic across gcs and it contains an
- // AllocationSite.
+ // A call to Array is special, it contains an AllocationSite as feedback.
+ // Clear the IC manually in order to test this case.
+ nexus.Clear(f->shared()->code());
CompileRun("f(Array)");
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
- CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot))->IsAllocationSite());
+ CHECK(nexus.GetFeedback()->IsAllocationSite());
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
@@ -299,10 +299,68 @@ TEST(VectorLoadICStates) {
// Finally driven megamorphic.
CompileRun("f({ blarg: 3, gran: 3, torino: 10, foo: 2 })");
CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
- CHECK_EQ(NULL, nexus.FindFirstMap());
+ CHECK(!nexus.FindFirstMap());
// After a collection, state should not be reset to PREMONOMORPHIC.
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
}
+
+
+TEST(VectorLoadICOnSmi) {
+ if (i::FLAG_always_opt || !i::FLAG_vector_ics) return;
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ // Make sure function f has a call that uses a type feedback slot.
+ CompileRun(
+ "var o = { foo: 3 };"
+ "function f(a) { return a.foo; } f(o);");
+ Handle<JSFunction> f = v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
+ // There should be one IC.
+ Handle<TypeFeedbackVector> feedback_vector =
+ Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
+ FeedbackVectorICSlot slot(0);
+ LoadICNexus nexus(feedback_vector, slot);
+ CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
+
+ CompileRun("f(34)");
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ // Verify that the monomorphic map is the one we expect.
+ Map* number_map = heap->heap_number_map();
+ CHECK_EQ(number_map, nexus.FindFirstMap());
+
+ // Now go polymorphic on o.
+ CompileRun("f(o)");
+ CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+
+ MapHandleList maps;
+ nexus.FindAllMaps(&maps);
+ CHECK_EQ(2, maps.length());
+
+ // One of the maps should be the o map.
+ Handle<JSObject> o = v8::Utils::OpenHandle(
+ *v8::Handle<v8::Object>::Cast(CcTest::global()->Get(v8_str("o"))));
+ bool number_map_found = false;
+ bool o_map_found = false;
+ for (int i = 0; i < maps.length(); i++) {
+ Handle<Map> current = maps[i];
+ if (*current == number_map)
+ number_map_found = true;
+ else if (*current == o->map())
+ o_map_found = true;
+ }
+ CHECK(number_map_found && o_map_found);
+
+ // The degree of polymorphism doesn't change.
+ CompileRun("f(100)");
+ CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+ MapHandleList maps2;
+ nexus.FindAllMaps(&maps2);
+ CHECK_EQ(2, maps2.length());
+}
}
diff --git a/deps/v8/test/cctest/test-fixed-dtoa.cc b/deps/v8/test/cctest/test-fixed-dtoa.cc
index de40d09f1b..7c2cc2eb15 100644
--- a/deps/v8/test/cctest/test-fixed-dtoa.cc
+++ b/deps/v8/test/cctest/test-fixed-dtoa.cc
@@ -46,445 +46,445 @@ TEST(FastFixedVariousDoubles) {
int point;
CHECK(FastFixedDtoa(1.0, 1, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.0, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.0, 0, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0xFFFFFFFF, 5, buffer, &length, &point));
- CHECK_EQ("4294967295", buffer.start());
+ CHECK_EQ(0, strcmp("4294967295", buffer.start()));
CHECK_EQ(10, point);
CHECK(FastFixedDtoa(4294967296.0, 5, buffer, &length, &point));
- CHECK_EQ("4294967296", buffer.start());
+ CHECK_EQ(0, strcmp("4294967296", buffer.start()));
CHECK_EQ(10, point);
CHECK(FastFixedDtoa(1e21, 5, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
// CHECK_EQ(22, point);
CHECK_EQ(22, point);
CHECK(FastFixedDtoa(999999999999999868928.00, 2, buffer, &length, &point));
- CHECK_EQ("999999999999999868928", buffer.start());
+ CHECK_EQ(0, strcmp("999999999999999868928", buffer.start()));
CHECK_EQ(21, point);
CHECK(FastFixedDtoa(6.9999999999999989514240000e+21, 5, buffer,
&length, &point));
- CHECK_EQ("6999999999999998951424", buffer.start());
+ CHECK_EQ(0, strcmp("6999999999999998951424", buffer.start()));
CHECK_EQ(22, point);
CHECK(FastFixedDtoa(1.5, 5, buffer, &length, &point));
- CHECK_EQ("15", buffer.start());
+ CHECK_EQ(0, strcmp("15", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.55, 5, buffer, &length, &point));
- CHECK_EQ("155", buffer.start());
+ CHECK_EQ(0, strcmp("155", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.55, 1, buffer, &length, &point));
- CHECK_EQ("16", buffer.start());
+ CHECK_EQ(0, strcmp("16", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.00000001, 15, buffer, &length, &point));
- CHECK_EQ("100000001", buffer.start());
+ CHECK_EQ(0, strcmp("100000001", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.1, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(0.01, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.001, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.0001, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00001, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.000001, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(0.0000001, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-6, point);
CHECK(FastFixedDtoa(0.00000001, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-7, point);
CHECK(FastFixedDtoa(0.000000001, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(0.0000000001, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-9, point);
CHECK(FastFixedDtoa(0.00000000001, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(0.000000000001, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-11, point);
CHECK(FastFixedDtoa(0.0000000000001, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-12, point);
CHECK(FastFixedDtoa(0.00000000000001, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-13, point);
CHECK(FastFixedDtoa(0.000000000000001, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-14, point);
CHECK(FastFixedDtoa(0.0000000000000001, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-15, point);
CHECK(FastFixedDtoa(0.00000000000000001, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-16, point);
CHECK(FastFixedDtoa(0.000000000000000001, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-17, point);
CHECK(FastFixedDtoa(0.0000000000000000001, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-18, point);
CHECK(FastFixedDtoa(0.00000000000000000001, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(0.10000000004, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(0.01000000004, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.00100000004, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.00010000004, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00001000004, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.00000100004, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(0.00000010004, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-6, point);
CHECK(FastFixedDtoa(0.00000001004, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-7, point);
CHECK(FastFixedDtoa(0.00000000104, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(0.0000000001000004, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-9, point);
CHECK(FastFixedDtoa(0.0000000000100004, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(0.0000000000010004, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-11, point);
CHECK(FastFixedDtoa(0.0000000000001004, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-12, point);
CHECK(FastFixedDtoa(0.0000000000000104, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-13, point);
CHECK(FastFixedDtoa(0.000000000000001000004, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-14, point);
CHECK(FastFixedDtoa(0.000000000000000100004, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-15, point);
CHECK(FastFixedDtoa(0.000000000000000010004, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-16, point);
CHECK(FastFixedDtoa(0.000000000000000001004, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-17, point);
CHECK(FastFixedDtoa(0.000000000000000000104, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-18, point);
CHECK(FastFixedDtoa(0.000000000000000000014, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(0.10000000006, 10, buffer, &length, &point));
- CHECK_EQ("1000000001", buffer.start());
+ CHECK_EQ(0, strcmp("1000000001", buffer.start()));
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(0.01000000006, 10, buffer, &length, &point));
- CHECK_EQ("100000001", buffer.start());
+ CHECK_EQ(0, strcmp("100000001", buffer.start()));
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.00100000006, 10, buffer, &length, &point));
- CHECK_EQ("10000001", buffer.start());
+ CHECK_EQ(0, strcmp("10000001", buffer.start()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.00010000006, 10, buffer, &length, &point));
- CHECK_EQ("1000001", buffer.start());
+ CHECK_EQ(0, strcmp("1000001", buffer.start()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00001000006, 10, buffer, &length, &point));
- CHECK_EQ("100001", buffer.start());
+ CHECK_EQ(0, strcmp("100001", buffer.start()));
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.00000100006, 10, buffer, &length, &point));
- CHECK_EQ("10001", buffer.start());
+ CHECK_EQ(0, strcmp("10001", buffer.start()));
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(0.00000010006, 10, buffer, &length, &point));
- CHECK_EQ("1001", buffer.start());
+ CHECK_EQ(0, strcmp("1001", buffer.start()));
CHECK_EQ(-6, point);
CHECK(FastFixedDtoa(0.00000001006, 10, buffer, &length, &point));
- CHECK_EQ("101", buffer.start());
+ CHECK_EQ(0, strcmp("101", buffer.start()));
CHECK_EQ(-7, point);
CHECK(FastFixedDtoa(0.00000000106, 10, buffer, &length, &point));
- CHECK_EQ("11", buffer.start());
+ CHECK_EQ(0, strcmp("11", buffer.start()));
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(0.0000000001000006, 15, buffer, &length, &point));
- CHECK_EQ("100001", buffer.start());
+ CHECK_EQ(0, strcmp("100001", buffer.start()));
CHECK_EQ(-9, point);
CHECK(FastFixedDtoa(0.0000000000100006, 15, buffer, &length, &point));
- CHECK_EQ("10001", buffer.start());
+ CHECK_EQ(0, strcmp("10001", buffer.start()));
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(0.0000000000010006, 15, buffer, &length, &point));
- CHECK_EQ("1001", buffer.start());
+ CHECK_EQ(0, strcmp("1001", buffer.start()));
CHECK_EQ(-11, point);
CHECK(FastFixedDtoa(0.0000000000001006, 15, buffer, &length, &point));
- CHECK_EQ("101", buffer.start());
+ CHECK_EQ(0, strcmp("101", buffer.start()));
CHECK_EQ(-12, point);
CHECK(FastFixedDtoa(0.0000000000000106, 15, buffer, &length, &point));
- CHECK_EQ("11", buffer.start());
+ CHECK_EQ(0, strcmp("11", buffer.start()));
CHECK_EQ(-13, point);
CHECK(FastFixedDtoa(0.000000000000001000006, 20, buffer, &length, &point));
- CHECK_EQ("100001", buffer.start());
+ CHECK_EQ(0, strcmp("100001", buffer.start()));
CHECK_EQ(-14, point);
CHECK(FastFixedDtoa(0.000000000000000100006, 20, buffer, &length, &point));
- CHECK_EQ("10001", buffer.start());
+ CHECK_EQ(0, strcmp("10001", buffer.start()));
CHECK_EQ(-15, point);
CHECK(FastFixedDtoa(0.000000000000000010006, 20, buffer, &length, &point));
- CHECK_EQ("1001", buffer.start());
+ CHECK_EQ(0, strcmp("1001", buffer.start()));
CHECK_EQ(-16, point);
CHECK(FastFixedDtoa(0.000000000000000001006, 20, buffer, &length, &point));
- CHECK_EQ("101", buffer.start());
+ CHECK_EQ(0, strcmp("101", buffer.start()));
CHECK_EQ(-17, point);
CHECK(FastFixedDtoa(0.000000000000000000106, 20, buffer, &length, &point));
- CHECK_EQ("11", buffer.start());
+ CHECK_EQ(0, strcmp("11", buffer.start()));
CHECK_EQ(-18, point);
CHECK(FastFixedDtoa(0.000000000000000000016, 20, buffer, &length, &point));
- CHECK_EQ("2", buffer.start());
+ CHECK_EQ(0, strcmp("2", buffer.start()));
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(0.6, 0, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.96, 1, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.996, 2, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9996, 3, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99996, 4, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999996, 5, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999996, 6, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99999996, 7, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999999996, 8, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999999996, 9, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99999999996, 10, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999999999996, 11, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999999999996, 12, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99999999999996, 13, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999999999999996, 14, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999999999999996, 15, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.00999999999999996, 16, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.000999999999999996, 17, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.0000999999999999996, 18, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00000999999999999996, 19, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.000000999999999999996, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(323423.234234, 10, buffer, &length, &point));
- CHECK_EQ("323423234234", buffer.start());
+ CHECK_EQ(0, strcmp("323423234234", buffer.start()));
CHECK_EQ(6, point);
CHECK(FastFixedDtoa(12345678.901234, 4, buffer, &length, &point));
- CHECK_EQ("123456789012", buffer.start());
+ CHECK_EQ(0, strcmp("123456789012", buffer.start()));
CHECK_EQ(8, point);
CHECK(FastFixedDtoa(98765.432109, 5, buffer, &length, &point));
- CHECK_EQ("9876543211", buffer.start());
+ CHECK_EQ(0, strcmp("9876543211", buffer.start()));
CHECK_EQ(5, point);
CHECK(FastFixedDtoa(42, 20, buffer, &length, &point));
- CHECK_EQ("42", buffer.start());
+ CHECK_EQ(0, strcmp("42", buffer.start()));
CHECK_EQ(2, point);
CHECK(FastFixedDtoa(0.5, 0, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1e-23, 10, buffer, &length, &point));
- CHECK_EQ("", buffer.start());
+ CHECK_EQ(0, strcmp("", buffer.start()));
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(1e-123, 2, buffer, &length, &point));
- CHECK_EQ("", buffer.start());
+ CHECK_EQ(0, strcmp("", buffer.start()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(1e-123, 0, buffer, &length, &point));
- CHECK_EQ("", buffer.start());
+ CHECK_EQ(0, strcmp("", buffer.start()));
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(1e-23, 20, buffer, &length, &point));
- CHECK_EQ("", buffer.start());
+ CHECK_EQ(0, strcmp("", buffer.start()));
CHECK_EQ(-20, point);
CHECK(FastFixedDtoa(1e-21, 20, buffer, &length, &point));
- CHECK_EQ("", buffer.start());
+ CHECK_EQ(0, strcmp("", buffer.start()));
CHECK_EQ(-20, point);
CHECK(FastFixedDtoa(1e-22, 20, buffer, &length, &point));
- CHECK_EQ("", buffer.start());
+ CHECK_EQ(0, strcmp("", buffer.start()));
CHECK_EQ(-20, point);
CHECK(FastFixedDtoa(6e-21, 20, buffer, &length, &point));
- CHECK_EQ("1", buffer.start());
+ CHECK_EQ(0, strcmp("1", buffer.start()));
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(9.1193616301674545152000000e+19, 0,
buffer, &length, &point));
- CHECK_EQ("91193616301674545152", buffer.start());
+ CHECK_EQ(0, strcmp("91193616301674545152", buffer.start()));
CHECK_EQ(20, point);
CHECK(FastFixedDtoa(4.8184662102767651659096515e-04, 19,
buffer, &length, &point));
- CHECK_EQ("4818466210276765", buffer.start());
+ CHECK_EQ(0, strcmp("4818466210276765", buffer.start()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(1.9023164229540652612705182e-23, 8,
buffer, &length, &point));
- CHECK_EQ("", buffer.start());
+ CHECK_EQ(0, strcmp("", buffer.start()));
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(1000000000000000128.0, 0,
buffer, &length, &point));
- CHECK_EQ("1000000000000000128", buffer.start());
+ CHECK_EQ(0, strcmp("1000000000000000128", buffer.start()));
CHECK_EQ(19, point);
}
@@ -507,6 +507,6 @@ TEST(FastFixedDtoaGayFixed) {
CHECK(status);
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length - point);
- CHECK_EQ(current_test.representation, buffer.start());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
}
}
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 862b73adba..e423fdc7e0 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -255,6 +255,6 @@ TEST(FlagsRemoveIncomplete) {
CHECK_EQ(2, FlagList::SetFlagsFromCommandLine(&argc,
const_cast<char **>(argv),
true));
- CHECK_NE(NULL, argv[1]);
+ CHECK(argv[1]);
CHECK_EQ(argc, 2);
}
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 7f3dafc063..24f9c73532 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -90,7 +90,7 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
// Verify inferred function name.
SmartArrayPointer<char> inferred_name =
shared_func_info->inferred_name()->ToCString();
- CHECK_EQ(ref_inferred_name, inferred_name.get());
+ CHECK_EQ(0, strcmp(ref_inferred_name, inferred_name.get()));
}
diff --git a/deps/v8/test/cctest/test-global-object.cc b/deps/v8/test/cctest/test-global-object.cc
index 0e2c9408c6..b0ed29daf1 100644
--- a/deps/v8/test/cctest/test-global-object.cc
+++ b/deps/v8/test/cctest/test-global-object.cc
@@ -47,5 +47,5 @@ TEST(StrictUndeclaredGlobalVariable) {
script->Run();
CHECK(try_catch.HasCaught());
v8::String::Utf8Value exception(try_catch.Exception());
- CHECK_EQ("ReferenceError: x is not defined", *exception);
+ CHECK_EQ(0, strcmp("ReferenceError: x is not defined", *exception));
}
diff --git a/deps/v8/test/cctest/test-hashing.cc b/deps/v8/test/cctest/test-hashing.cc
index 692861cfe4..c8ae4f30e5 100644
--- a/deps/v8/test/cctest/test-hashing.cc
+++ b/deps/v8/test/cctest/test-hashing.cc
@@ -90,6 +90,14 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#elif V8_TARGET_ARCH_PPC
+ __ function_descriptor();
+ __ push(kRootRegister);
+ __ InitializeRootRegister();
+ __ li(r3, Operand(key));
+ __ GetNumberHash(r3, ip);
+ __ pop(kRootRegister);
+ __ blr();
#else
#error Unsupported architecture.
#endif
diff --git a/deps/v8/test/cctest/test-hashmap.cc b/deps/v8/test/cctest/test-hashmap.cc
index 1e94bed593..68a7e8911b 100644
--- a/deps/v8/test/cctest/test-hashmap.cc
+++ b/deps/v8/test/cctest/test-hashmap.cc
@@ -93,37 +93,37 @@ static uint32_t CollisionHash(uint32_t key) { return key & 0x3; }
void TestSet(IntKeyHash hash, int size) {
IntSet set(hash);
- CHECK_EQ(0, set.occupancy());
+ CHECK_EQ(0u, set.occupancy());
set.Insert(1);
set.Insert(2);
set.Insert(3);
- CHECK_EQ(3, set.occupancy());
+ CHECK_EQ(3u, set.occupancy());
set.Insert(2);
set.Insert(3);
- CHECK_EQ(3, set.occupancy());
+ CHECK_EQ(3u, set.occupancy());
CHECK(set.Present(1));
CHECK(set.Present(2));
CHECK(set.Present(3));
CHECK(!set.Present(4));
- CHECK_EQ(3, set.occupancy());
+ CHECK_EQ(3u, set.occupancy());
set.Remove(1);
CHECK(!set.Present(1));
CHECK(set.Present(2));
CHECK(set.Present(3));
- CHECK_EQ(2, set.occupancy());
+ CHECK_EQ(2u, set.occupancy());
set.Remove(3);
CHECK(!set.Present(1));
CHECK(set.Present(2));
CHECK(!set.Present(3));
- CHECK_EQ(1, set.occupancy());
+ CHECK_EQ(1u, set.occupancy());
set.Clear();
- CHECK_EQ(0, set.occupancy());
+ CHECK_EQ(0u, set.occupancy());
// Insert a long series of values.
const int start = 453;
@@ -167,7 +167,7 @@ void TestSet(IntKeyHash hash, int size) {
y = y * factor + offset;
}
}
- CHECK_EQ(0, set.occupancy());
+ CHECK_EQ(0u, set.occupancy());
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 94a5be47c1..5c9d2e69f0 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -191,12 +191,10 @@ TEST(HeapSnapshot) {
// Verify, that JS global object of env2 has '..2' properties.
const v8::HeapGraphNode* a2_node =
GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "a2");
- CHECK_NE(NULL, a2_node);
- CHECK_NE(
- NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_1"));
- CHECK_NE(
- NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_2"));
- CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "c2"));
+ CHECK(a2_node);
+ CHECK(GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_1"));
+ CHECK(GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_2"));
+ CHECK(GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "c2"));
NamedEntriesDetector det;
det.CheckAllReachables(const_cast<i::HeapEntry*>(
@@ -225,13 +223,13 @@ TEST(HeapSnapshotObjectSizes) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* x =
GetProperty(global, v8::HeapGraphEdge::kProperty, "x");
- CHECK_NE(NULL, x);
+ CHECK(x);
const v8::HeapGraphNode* x1 =
GetProperty(x, v8::HeapGraphEdge::kProperty, "a");
- CHECK_NE(NULL, x1);
+ CHECK(x1);
const v8::HeapGraphNode* x2 =
GetProperty(x, v8::HeapGraphEdge::kProperty, "b");
- CHECK_NE(NULL, x2);
+ CHECK(x2);
// Test sizes.
CHECK_NE(0, static_cast<int>(x->GetShallowSize()));
@@ -255,13 +253,13 @@ TEST(BoundFunctionInSnapshot) {
const v8::HeapGraphNode* f =
GetProperty(global, v8::HeapGraphEdge::kProperty, "boundFunction");
CHECK(f);
- CHECK_EQ(v8::String::NewFromUtf8(env->GetIsolate(), "native_bind"),
- f->GetName());
+ CHECK(v8::String::NewFromUtf8(env->GetIsolate(), "native_bind")
+ ->Equals(f->GetName()));
const v8::HeapGraphNode* bindings =
GetProperty(f, v8::HeapGraphEdge::kInternal, "bindings");
- CHECK_NE(NULL, bindings);
+ CHECK(bindings);
CHECK_EQ(v8::HeapGraphNode::kArray, bindings->GetType());
- CHECK_EQ(4, bindings->GetChildrenCount());
+ CHECK_EQ(3, bindings->GetChildrenCount());
const v8::HeapGraphNode* bound_this = GetProperty(
f, v8::HeapGraphEdge::kShortcut, "bound_this");
@@ -298,7 +296,7 @@ TEST(HeapSnapshotEntryChildren) {
}
const v8::HeapGraphNode* a =
GetProperty(global, v8::HeapGraphEdge::kProperty, "a");
- CHECK_NE(NULL, a);
+ CHECK(a);
for (int i = 0, count = a->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = a->GetChild(i);
CHECK_EQ(a, prop->GetFromNode());
@@ -323,35 +321,35 @@ TEST(HeapSnapshotCodeObjects) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* compiled =
GetProperty(global, v8::HeapGraphEdge::kProperty, "compiled");
- CHECK_NE(NULL, compiled);
+ CHECK(compiled);
CHECK_EQ(v8::HeapGraphNode::kClosure, compiled->GetType());
const v8::HeapGraphNode* lazy =
GetProperty(global, v8::HeapGraphEdge::kProperty, "lazy");
- CHECK_NE(NULL, lazy);
+ CHECK(lazy);
CHECK_EQ(v8::HeapGraphNode::kClosure, lazy->GetType());
const v8::HeapGraphNode* anonymous =
GetProperty(global, v8::HeapGraphEdge::kProperty, "anonymous");
- CHECK_NE(NULL, anonymous);
+ CHECK(anonymous);
CHECK_EQ(v8::HeapGraphNode::kClosure, anonymous->GetType());
v8::String::Utf8Value anonymous_name(anonymous->GetName());
- CHECK_EQ("", *anonymous_name);
+ CHECK_EQ(0, strcmp("", *anonymous_name));
// Find references to code.
const v8::HeapGraphNode* compiled_code =
GetProperty(compiled, v8::HeapGraphEdge::kInternal, "shared");
- CHECK_NE(NULL, compiled_code);
+ CHECK(compiled_code);
const v8::HeapGraphNode* lazy_code =
GetProperty(lazy, v8::HeapGraphEdge::kInternal, "shared");
- CHECK_NE(NULL, lazy_code);
+ CHECK(lazy_code);
// Check that there's no strong next_code_link. There might be a weak one
// but might be not, so we can't check that fact.
const v8::HeapGraphNode* code =
GetProperty(compiled_code, v8::HeapGraphEdge::kInternal, "code");
- CHECK_NE(NULL, code);
+ CHECK(code);
const v8::HeapGraphNode* next_code_link =
GetProperty(code, v8::HeapGraphEdge::kInternal, "code");
- CHECK_EQ(NULL, next_code_link);
+ CHECK(!next_code_link);
// Verify that non-compiled code doesn't contain references to "x"
// literal, while compiled code does. The scope info is stored in FixedArray
@@ -393,10 +391,10 @@ TEST(HeapSnapshotHeapNumbers) {
heap_profiler->TakeHeapSnapshot(v8_str("numbers"));
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_EQ(NULL, GetProperty(global, v8::HeapGraphEdge::kProperty, "a"));
+ CHECK(!GetProperty(global, v8::HeapGraphEdge::kProperty, "a"));
const v8::HeapGraphNode* b =
GetProperty(global, v8::HeapGraphEdge::kProperty, "b");
- CHECK_NE(NULL, b);
+ CHECK(b);
CHECK_EQ(v8::HeapGraphNode::kHeapNumber, b->GetType());
}
@@ -409,6 +407,10 @@ TEST(HeapSnapshotSlicedString) {
"parent_string = \"123456789.123456789.123456789.123456789.123456789."
"123456789.123456789.123456789.123456789.123456789."
"123456789.123456789.123456789.123456789.123456789."
+ "123456789.123456789.123456789.123456789.123456789."
+ "123456789.123456789.123456789.123456789.123456789."
+ "123456789.123456789.123456789.123456789.123456789."
+ "123456789.123456789.123456789.123456789.123456789."
"123456789.123456789.123456789.123456789.123456789.\";"
"child_string = parent_string.slice(100);");
const v8::HeapSnapshot* snapshot =
@@ -417,10 +419,10 @@ TEST(HeapSnapshotSlicedString) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* parent_string =
GetProperty(global, v8::HeapGraphEdge::kProperty, "parent_string");
- CHECK_NE(NULL, parent_string);
+ CHECK(parent_string);
const v8::HeapGraphNode* child_string =
GetProperty(global, v8::HeapGraphEdge::kProperty, "child_string");
- CHECK_NE(NULL, child_string);
+ CHECK(child_string);
CHECK_EQ(v8::HeapGraphNode::kSlicedString, child_string->GetType());
const v8::HeapGraphNode* parent =
GetProperty(child_string, v8::HeapGraphEdge::kInternal, "parent");
@@ -456,7 +458,7 @@ TEST(HeapSnapshotConsString) {
const v8::HeapGraphNode* string_node =
GetProperty(global_node, v8::HeapGraphEdge::kInternal, "0");
- CHECK_NE(NULL, string_node);
+ CHECK(string_node);
CHECK_EQ(v8::HeapGraphNode::kConsString, string_node->GetType());
const v8::HeapGraphNode* first_node =
@@ -483,13 +485,13 @@ TEST(HeapSnapshotSymbol) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* a =
GetProperty(global, v8::HeapGraphEdge::kProperty, "a");
- CHECK_NE(NULL, a);
+ CHECK(a);
CHECK_EQ(a->GetType(), v8::HeapGraphNode::kSymbol);
- CHECK_EQ(v8_str("symbol"), a->GetName());
+ CHECK(v8_str("symbol")->Equals(a->GetName()));
const v8::HeapGraphNode* name =
GetProperty(a, v8::HeapGraphEdge::kInternal, "name");
- CHECK_NE(NULL, name);
- CHECK_EQ(v8_str("mySymbol"), name->GetName());
+ CHECK(name);
+ CHECK(v8_str("mySymbol")->Equals(name->GetName()));
}
@@ -508,19 +510,19 @@ TEST(HeapSnapshotWeakCollection) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* k =
GetProperty(global, v8::HeapGraphEdge::kProperty, "k");
- CHECK_NE(NULL, k);
+ CHECK(k);
const v8::HeapGraphNode* v =
GetProperty(global, v8::HeapGraphEdge::kProperty, "v");
- CHECK_NE(NULL, v);
+ CHECK(v);
const v8::HeapGraphNode* s =
GetProperty(global, v8::HeapGraphEdge::kProperty, "s");
- CHECK_NE(NULL, s);
+ CHECK(s);
const v8::HeapGraphNode* ws =
GetProperty(global, v8::HeapGraphEdge::kProperty, "ws");
- CHECK_NE(NULL, ws);
+ CHECK(ws);
CHECK_EQ(v8::HeapGraphNode::kObject, ws->GetType());
- CHECK_EQ(v8_str("WeakSet"), ws->GetName());
+ CHECK(v8_str("WeakSet")->Equals(ws->GetName()));
const v8::HeapGraphNode* ws_table =
GetProperty(ws, v8::HeapGraphEdge::kInternal, "table");
@@ -537,14 +539,14 @@ TEST(HeapSnapshotWeakCollection) {
CHECK_EQ(1, weak_entries);
const v8::HeapGraphNode* ws_s =
GetProperty(ws, v8::HeapGraphEdge::kProperty, "str");
- CHECK_NE(NULL, ws_s);
- CHECK_EQ(static_cast<int>(s->GetId()), static_cast<int>(ws_s->GetId()));
+ CHECK(ws_s);
+ CHECK_EQ(s->GetId(), ws_s->GetId());
const v8::HeapGraphNode* wm =
GetProperty(global, v8::HeapGraphEdge::kProperty, "wm");
- CHECK_NE(NULL, wm);
+ CHECK(wm);
CHECK_EQ(v8::HeapGraphNode::kObject, wm->GetType());
- CHECK_EQ(v8_str("WeakMap"), wm->GetName());
+ CHECK(v8_str("WeakMap")->Equals(wm->GetName()));
const v8::HeapGraphNode* wm_table =
GetProperty(wm, v8::HeapGraphEdge::kInternal, "table");
@@ -562,8 +564,8 @@ TEST(HeapSnapshotWeakCollection) {
CHECK_EQ(2, weak_entries);
const v8::HeapGraphNode* wm_s =
GetProperty(wm, v8::HeapGraphEdge::kProperty, "str");
- CHECK_NE(NULL, wm_s);
- CHECK_EQ(static_cast<int>(s->GetId()), static_cast<int>(wm_s->GetId()));
+ CHECK(wm_s);
+ CHECK_EQ(s->GetId(), wm_s->GetId());
}
@@ -582,19 +584,19 @@ TEST(HeapSnapshotCollection) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* k =
GetProperty(global, v8::HeapGraphEdge::kProperty, "k");
- CHECK_NE(NULL, k);
+ CHECK(k);
const v8::HeapGraphNode* v =
GetProperty(global, v8::HeapGraphEdge::kProperty, "v");
- CHECK_NE(NULL, v);
+ CHECK(v);
const v8::HeapGraphNode* s =
GetProperty(global, v8::HeapGraphEdge::kProperty, "s");
- CHECK_NE(NULL, s);
+ CHECK(s);
const v8::HeapGraphNode* set =
GetProperty(global, v8::HeapGraphEdge::kProperty, "set");
- CHECK_NE(NULL, set);
+ CHECK(set);
CHECK_EQ(v8::HeapGraphNode::kObject, set->GetType());
- CHECK_EQ(v8_str("Set"), set->GetName());
+ CHECK(v8_str("Set")->Equals(set->GetName()));
const v8::HeapGraphNode* set_table =
GetProperty(set, v8::HeapGraphEdge::kInternal, "table");
@@ -611,14 +613,14 @@ TEST(HeapSnapshotCollection) {
CHECK_EQ(2, entries);
const v8::HeapGraphNode* set_s =
GetProperty(set, v8::HeapGraphEdge::kProperty, "str");
- CHECK_NE(NULL, set_s);
- CHECK_EQ(static_cast<int>(s->GetId()), static_cast<int>(set_s->GetId()));
+ CHECK(set_s);
+ CHECK_EQ(s->GetId(), set_s->GetId());
const v8::HeapGraphNode* map =
GetProperty(global, v8::HeapGraphEdge::kProperty, "map");
- CHECK_NE(NULL, map);
+ CHECK(map);
CHECK_EQ(v8::HeapGraphNode::kObject, map->GetType());
- CHECK_EQ(v8_str("Map"), map->GetName());
+ CHECK(v8_str("Map")->Equals(map->GetName()));
const v8::HeapGraphNode* map_table =
GetProperty(map, v8::HeapGraphEdge::kInternal, "table");
@@ -635,8 +637,8 @@ TEST(HeapSnapshotCollection) {
CHECK_EQ(2, entries);
const v8::HeapGraphNode* map_s =
GetProperty(map, v8::HeapGraphEdge::kProperty, "str");
- CHECK_NE(NULL, map_s);
- CHECK_EQ(static_cast<int>(s->GetId()), static_cast<int>(map_s->GetId()));
+ CHECK(map_s);
+ CHECK_EQ(s->GetId(), map_s->GetId());
}
@@ -659,20 +661,12 @@ TEST(HeapSnapshotInternalReferences) {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global_node = GetGlobalObject(snapshot);
// The first reference will not present, because it's a Smi.
- CHECK_EQ(NULL, GetProperty(global_node, v8::HeapGraphEdge::kInternal, "0"));
+ CHECK(!GetProperty(global_node, v8::HeapGraphEdge::kInternal, "0"));
// The second reference is to an object.
- CHECK_NE(NULL, GetProperty(global_node, v8::HeapGraphEdge::kInternal, "1"));
+ CHECK(GetProperty(global_node, v8::HeapGraphEdge::kInternal, "1"));
}
-// Trying to introduce a check helper for uint32_t causes many
-// overloading ambiguities, so it seems easier just to cast
-// them to a signed type.
-#define CHECK_EQ_SNAPSHOT_OBJECT_ID(a, b) \
- CHECK_EQ(static_cast<int32_t>(a), static_cast<int32_t>(b))
-#define CHECK_NE_SNAPSHOT_OBJECT_ID(a, b) \
- CHECK((a) != (b)) // NOLINT
-
TEST(HeapSnapshotAddressReuse) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -700,7 +694,7 @@ TEST(HeapSnapshotAddressReuse) {
const v8::HeapGraphNode* array_node =
GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
- CHECK_NE(NULL, array_node);
+ CHECK(array_node);
int wrong_count = 0;
for (int i = 0, count = array_node->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = array_node->GetChild(i);
@@ -743,24 +737,24 @@ TEST(HeapEntryIdsAndArrayShift) {
const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
- CHECK_NE_SNAPSHOT_OBJECT_ID(0, global1->GetId());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(global1->GetId(), global2->GetId());
+ CHECK_NE(0u, global1->GetId());
+ CHECK_EQ(global1->GetId(), global2->GetId());
const v8::HeapGraphNode* a1 =
GetProperty(global1, v8::HeapGraphEdge::kProperty, "a");
- CHECK_NE(NULL, a1);
+ CHECK(a1);
const v8::HeapGraphNode* k1 =
GetProperty(a1, v8::HeapGraphEdge::kInternal, "elements");
- CHECK_NE(NULL, k1);
+ CHECK(k1);
const v8::HeapGraphNode* a2 =
GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
- CHECK_NE(NULL, a2);
+ CHECK(a2);
const v8::HeapGraphNode* k2 =
GetProperty(a2, v8::HeapGraphEdge::kInternal, "elements");
- CHECK_NE(NULL, k2);
+ CHECK(k2);
- CHECK_EQ_SNAPSHOT_OBJECT_ID(a1->GetId(), a2->GetId());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(k1->GetId(), k2->GetId());
+ CHECK_EQ(a1->GetId(), a2->GetId());
+ CHECK_EQ(k1->GetId(), k2->GetId());
}
@@ -786,46 +780,46 @@ TEST(HeapEntryIdsAndGC) {
heap_profiler->TakeHeapSnapshot(s2_str);
CHECK(ValidateSnapshot(snapshot2));
- CHECK_GT(snapshot1->GetMaxSnapshotJSObjectId(), 7000);
+ CHECK_GT(snapshot1->GetMaxSnapshotJSObjectId(), 7000u);
CHECK(snapshot1->GetMaxSnapshotJSObjectId() <=
snapshot2->GetMaxSnapshotJSObjectId());
const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
- CHECK_NE_SNAPSHOT_OBJECT_ID(0, global1->GetId());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(global1->GetId(), global2->GetId());
+ CHECK_NE(0u, global1->GetId());
+ CHECK_EQ(global1->GetId(), global2->GetId());
const v8::HeapGraphNode* A1 =
GetProperty(global1, v8::HeapGraphEdge::kProperty, "A");
- CHECK_NE(NULL, A1);
+ CHECK(A1);
const v8::HeapGraphNode* A2 =
GetProperty(global2, v8::HeapGraphEdge::kProperty, "A");
- CHECK_NE(NULL, A2);
- CHECK_NE_SNAPSHOT_OBJECT_ID(0, A1->GetId());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(A1->GetId(), A2->GetId());
+ CHECK(A2);
+ CHECK_NE(0u, A1->GetId());
+ CHECK_EQ(A1->GetId(), A2->GetId());
const v8::HeapGraphNode* B1 =
GetProperty(global1, v8::HeapGraphEdge::kProperty, "B");
- CHECK_NE(NULL, B1);
+ CHECK(B1);
const v8::HeapGraphNode* B2 =
GetProperty(global2, v8::HeapGraphEdge::kProperty, "B");
- CHECK_NE(NULL, B2);
- CHECK_NE_SNAPSHOT_OBJECT_ID(0, B1->GetId());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(B1->GetId(), B2->GetId());
+ CHECK(B2);
+ CHECK_NE(0u, B1->GetId());
+ CHECK_EQ(B1->GetId(), B2->GetId());
const v8::HeapGraphNode* a1 =
GetProperty(global1, v8::HeapGraphEdge::kProperty, "a");
- CHECK_NE(NULL, a1);
+ CHECK(a1);
const v8::HeapGraphNode* a2 =
GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
- CHECK_NE(NULL, a2);
- CHECK_NE_SNAPSHOT_OBJECT_ID(0, a1->GetId());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(a1->GetId(), a2->GetId());
+ CHECK(a2);
+ CHECK_NE(0u, a1->GetId());
+ CHECK_EQ(a1->GetId(), a2->GetId());
const v8::HeapGraphNode* b1 =
GetProperty(global1, v8::HeapGraphEdge::kProperty, "b");
- CHECK_NE(NULL, b1);
+ CHECK(b1);
const v8::HeapGraphNode* b2 =
GetProperty(global2, v8::HeapGraphEdge::kProperty, "b");
- CHECK_NE(NULL, b2);
- CHECK_NE_SNAPSHOT_OBJECT_ID(0, b1->GetId());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(b1->GetId(), b2->GetId());
+ CHECK(b2);
+ CHECK_NE(0u, b1->GetId());
+ CHECK_EQ(b1->GetId(), b2->GetId());
}
@@ -993,8 +987,8 @@ TEST(HeapSnapshotJSONSerialization) {
v8::Local<v8::String> ref_string =
CompileRun(STRING_LITERAL_FOR_TEST)->ToString(isolate);
#undef STRING_LITERAL_FOR_TEST
- CHECK_EQ(*v8::String::Utf8Value(ref_string),
- *v8::String::Utf8Value(string));
+ CHECK_EQ(0, strcmp(*v8::String::Utf8Value(ref_string),
+ *v8::String::Utf8Value(string)));
}
@@ -1100,14 +1094,14 @@ TEST(HeapSnapshotObjectsStats) {
&initial_id);
CHECK_EQ(1, stats_update.intervals_count());
CHECK_EQ(1, stats_update.updates_written());
- CHECK_LT(0, stats_update.entries_size());
+ CHECK_LT(0u, stats_update.entries_size());
CHECK_EQ(0, stats_update.first_interval_index());
}
// No data expected in update because nothing has happened.
v8::SnapshotObjectId same_id;
CHECK_EQ(0, GetHeapStatsUpdate(heap_profiler, &same_id).updates_written());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(initial_id, same_id);
+ CHECK_EQ(initial_id, same_id);
{
v8::SnapshotObjectId additional_string_id;
@@ -1120,15 +1114,15 @@ TEST(HeapSnapshotObjectsStats) {
CHECK_LT(same_id, additional_string_id);
CHECK_EQ(1, stats_update.intervals_count());
CHECK_EQ(1, stats_update.updates_written());
- CHECK_LT(0, stats_update.entries_size());
- CHECK_EQ(1, stats_update.entries_count());
+ CHECK_LT(0u, stats_update.entries_size());
+ CHECK_EQ(1u, stats_update.entries_count());
CHECK_EQ(2, stats_update.first_interval_index());
}
// No data expected in update because nothing happened.
v8::SnapshotObjectId last_id;
CHECK_EQ(0, GetHeapStatsUpdate(heap_profiler, &last_id).updates_written());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(additional_string_id, last_id);
+ CHECK_EQ(additional_string_id, last_id);
{
v8::HandleScope inner_scope_2(env->GetIsolate());
@@ -1145,8 +1139,8 @@ TEST(HeapSnapshotObjectsStats) {
TestStatsStream stats_update = GetHeapStatsUpdate(heap_profiler);
CHECK_EQ(1, stats_update.intervals_count());
CHECK_EQ(1, stats_update.updates_written());
- CHECK_LT(0, entries_size = stats_update.entries_size());
- CHECK_EQ(3, stats_update.entries_count());
+ CHECK_LT(0u, entries_size = stats_update.entries_size());
+ CHECK_EQ(3u, stats_update.entries_count());
CHECK_EQ(4, stats_update.first_interval_index());
}
}
@@ -1157,7 +1151,7 @@ TEST(HeapSnapshotObjectsStats) {
CHECK_EQ(1, stats_update.intervals_count());
CHECK_EQ(1, stats_update.updates_written());
CHECK_GT(entries_size, stats_update.entries_size());
- CHECK_EQ(1, stats_update.entries_count());
+ CHECK_EQ(1u, stats_update.entries_count());
// Two strings from forth interval were released.
CHECK_EQ(4, stats_update.first_interval_index());
}
@@ -1168,8 +1162,8 @@ TEST(HeapSnapshotObjectsStats) {
TestStatsStream stats_update = GetHeapStatsUpdate(heap_profiler);
CHECK_EQ(1, stats_update.intervals_count());
CHECK_EQ(1, stats_update.updates_written());
- CHECK_EQ(0, stats_update.entries_size());
- CHECK_EQ(0, stats_update.entries_count());
+ CHECK_EQ(0u, stats_update.entries_size());
+ CHECK_EQ(0u, stats_update.entries_count());
// The last string from forth interval was released.
CHECK_EQ(4, stats_update.first_interval_index());
}
@@ -1179,14 +1173,14 @@ TEST(HeapSnapshotObjectsStats) {
TestStatsStream stats_update = GetHeapStatsUpdate(heap_profiler);
CHECK_EQ(1, stats_update.intervals_count());
CHECK_EQ(1, stats_update.updates_written());
- CHECK_EQ(0, stats_update.entries_size());
- CHECK_EQ(0, stats_update.entries_count());
+ CHECK_EQ(0u, stats_update.entries_size());
+ CHECK_EQ(0u, stats_update.entries_count());
// The only string from the second interval was released.
CHECK_EQ(2, stats_update.first_interval_index());
}
v8::Local<v8::Array> array = v8::Array::New(env->GetIsolate());
- CHECK_EQ(0, array->Length());
+ CHECK_EQ(0u, array->Length());
// Force array's buffer allocation.
array->Set(2, v8_num(7));
@@ -1196,9 +1190,9 @@ TEST(HeapSnapshotObjectsStats) {
TestStatsStream stats_update = GetHeapStatsUpdate(heap_profiler);
CHECK_EQ(1, stats_update.intervals_count());
CHECK_EQ(1, stats_update.updates_written());
- CHECK_LT(0, entries_size = stats_update.entries_size());
+ CHECK_LT(0u, entries_size = stats_update.entries_size());
// They are the array and its buffer.
- CHECK_EQ(2, stats_update.entries_count());
+ CHECK_EQ(2u, stats_update.entries_count());
CHECK_EQ(8, stats_update.first_interval_index());
}
@@ -1213,7 +1207,7 @@ TEST(HeapSnapshotObjectsStats) {
// The second interval was changed because new buffer was allocated.
CHECK_EQ(2, stats_update.updates_written());
CHECK_LT(entries_size, stats_update.entries_size());
- CHECK_EQ(2, stats_update.entries_count());
+ CHECK_EQ(2u, stats_update.entries_count());
CHECK_EQ(8, stats_update.first_interval_index());
}
@@ -1240,7 +1234,7 @@ TEST(HeapObjectIds) {
for (int i = 0; i < kLength; i++) {
v8::SnapshotObjectId id = heap_profiler->GetObjectId(objects[i]);
- CHECK_NE(v8::HeapProfiler::kUnknownObjectId, static_cast<int>(id));
+ CHECK_NE(v8::HeapProfiler::kUnknownObjectId, id);
ids[i] = id;
}
@@ -1249,15 +1243,15 @@ TEST(HeapObjectIds) {
for (int i = 0; i < kLength; i++) {
v8::SnapshotObjectId id = heap_profiler->GetObjectId(objects[i]);
- CHECK_EQ(static_cast<int>(ids[i]), static_cast<int>(id));
+ CHECK_EQ(ids[i], id);
v8::Handle<v8::Value> obj = heap_profiler->FindObjectById(ids[i]);
- CHECK_EQ(objects[i], obj);
+ CHECK(objects[i]->Equals(obj));
}
heap_profiler->ClearObjectIds();
for (int i = 0; i < kLength; i++) {
v8::SnapshotObjectId id = heap_profiler->GetObjectId(objects[i]);
- CHECK_EQ(v8::HeapProfiler::kUnknownObjectId, static_cast<int>(id));
+ CHECK_EQ(v8::HeapProfiler::kUnknownObjectId, id);
v8::Handle<v8::Value> obj = heap_profiler->FindObjectById(ids[i]);
CHECK(obj.IsEmpty());
}
@@ -1273,7 +1267,7 @@ static void CheckChildrenIds(const v8::HeapSnapshot* snapshot,
const v8::HeapGraphEdge* prop = node->GetChild(i);
const v8::HeapGraphNode* child =
snapshot->GetNodeById(prop->GetToNode()->GetId());
- CHECK_EQ_SNAPSHOT_OBJECT_ID(prop->GetToNode()->GetId(), child->GetId());
+ CHECK_EQ(prop->GetToNode()->GetId(), child->GetId());
CHECK_EQ(prop->GetToNode(), child);
CheckChildrenIds(snapshot, child, level + 1, max_level);
}
@@ -1291,7 +1285,7 @@ TEST(HeapSnapshotGetNodeById) {
const v8::HeapGraphNode* root = snapshot->GetRoot();
CheckChildrenIds(snapshot, root, 0, 3);
// Check a big id, which should not exist yet.
- CHECK_EQ(NULL, snapshot->GetNodeById(0x1000000UL));
+ CHECK(!snapshot->GetNodeById(0x1000000UL));
}
@@ -1314,9 +1308,8 @@ TEST(HeapSnapshotGetSnapshotObjectId) {
CHECK(globalObjectHandle->IsObject());
v8::SnapshotObjectId id = heap_profiler->GetObjectId(globalObjectHandle);
- CHECK_NE(static_cast<int>(v8::HeapProfiler::kUnknownObjectId),
- id);
- CHECK_EQ(static_cast<int>(id), global_object->GetId());
+ CHECK_NE(v8::HeapProfiler::kUnknownObjectId, id);
+ CHECK_EQ(id, global_object->GetId());
}
@@ -1330,7 +1323,7 @@ TEST(HeapSnapshotUnknownSnapshotObjectId) {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* node =
snapshot->GetNodeById(v8::HeapProfiler::kUnknownObjectId);
- CHECK_EQ(NULL, node);
+ CHECK(!node);
}
@@ -1366,7 +1359,7 @@ TEST(TakeHeapSnapshotAborting) {
const v8::HeapSnapshot* no_snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("abort"),
&aborting_control);
- CHECK_EQ(NULL, no_snapshot);
+ CHECK(!no_snapshot);
CHECK_EQ(snapshots_count, heap_profiler->GetSnapshotCount());
CHECK_GT(aborting_control.total(), aborting_control.done());
@@ -1376,7 +1369,7 @@ TEST(TakeHeapSnapshotAborting) {
&control);
CHECK(ValidateSnapshot(snapshot));
- CHECK_NE(NULL, snapshot);
+ CHECK(snapshot);
CHECK_EQ(snapshots_count + 1, heap_profiler->GetSnapshotCount());
CHECK_EQ(control.total(), control.done());
CHECK_GT(control.total(), 0);
@@ -1496,29 +1489,29 @@ TEST(HeapSnapshotRetainedObjectInfo) {
const v8::HeapGraphNode* native_group_aaa = GetNode(
snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "aaa-group");
- CHECK_NE(NULL, native_group_aaa);
+ CHECK(native_group_aaa);
CHECK_EQ(1, native_group_aaa->GetChildrenCount());
const v8::HeapGraphNode* aaa = GetNode(
native_group_aaa, v8::HeapGraphNode::kNative, "aaa / 100 entries");
- CHECK_NE(NULL, aaa);
+ CHECK(aaa);
CHECK_EQ(2, aaa->GetChildrenCount());
const v8::HeapGraphNode* native_group_ccc = GetNode(
snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "ccc-group");
const v8::HeapGraphNode* ccc = GetNode(
native_group_ccc, v8::HeapGraphNode::kNative, "ccc");
- CHECK_NE(NULL, ccc);
+ CHECK(ccc);
const v8::HeapGraphNode* n_AAA = GetNode(
aaa, v8::HeapGraphNode::kString, "AAA");
- CHECK_NE(NULL, n_AAA);
+ CHECK(n_AAA);
const v8::HeapGraphNode* n_BBB = GetNode(
aaa, v8::HeapGraphNode::kString, "BBB");
- CHECK_NE(NULL, n_BBB);
+ CHECK(n_BBB);
CHECK_EQ(1, ccc->GetChildrenCount());
const v8::HeapGraphNode* n_CCC = GetNode(
ccc, v8::HeapGraphNode::kString, "CCC");
- CHECK_NE(NULL, n_CCC);
+ CHECK(n_CCC);
CHECK_EQ(aaa, GetProperty(n_AAA, v8::HeapGraphEdge::kInternal, "native"));
CHECK_EQ(aaa, GetProperty(n_BBB, v8::HeapGraphEdge::kInternal, "native"));
@@ -1530,7 +1523,7 @@ class GraphWithImplicitRefs {
public:
static const int kObjectsCount = 4;
explicit GraphWithImplicitRefs(LocalContext* env) {
- CHECK_EQ(NULL, instance_);
+ CHECK(!instance_);
instance_ = this;
isolate_ = (*env)->GetIsolate();
for (int i = 0; i < kObjectsCount; i++) {
@@ -1611,12 +1604,12 @@ TEST(DeleteAllHeapSnapshots) {
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
heap_profiler->DeleteAllHeapSnapshots();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK_NE(NULL, heap_profiler->TakeHeapSnapshot(v8_str("1")));
+ CHECK(heap_profiler->TakeHeapSnapshot(v8_str("1")));
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
heap_profiler->DeleteAllHeapSnapshots();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK_NE(NULL, heap_profiler->TakeHeapSnapshot(v8_str("1")));
- CHECK_NE(NULL, heap_profiler->TakeHeapSnapshot(v8_str("2")));
+ CHECK(heap_profiler->TakeHeapSnapshot(v8_str("1")));
+ CHECK(heap_profiler->TakeHeapSnapshot(v8_str("2")));
CHECK_EQ(2, heap_profiler->GetSnapshotCount());
heap_profiler->DeleteAllHeapSnapshots();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
@@ -1645,35 +1638,35 @@ TEST(DeleteHeapSnapshot) {
const v8::HeapSnapshot* s1 =
heap_profiler->TakeHeapSnapshot(v8_str("1"));
- CHECK_NE(NULL, s1);
+ CHECK(s1);
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
unsigned uid1 = s1->GetUid();
CHECK_EQ(s1, FindHeapSnapshot(heap_profiler, uid1));
const_cast<v8::HeapSnapshot*>(s1)->Delete();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK_EQ(NULL, FindHeapSnapshot(heap_profiler, uid1));
+ CHECK(!FindHeapSnapshot(heap_profiler, uid1));
const v8::HeapSnapshot* s2 =
heap_profiler->TakeHeapSnapshot(v8_str("2"));
- CHECK_NE(NULL, s2);
+ CHECK(s2);
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
unsigned uid2 = s2->GetUid();
CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid2));
CHECK_EQ(s2, FindHeapSnapshot(heap_profiler, uid2));
const v8::HeapSnapshot* s3 =
heap_profiler->TakeHeapSnapshot(v8_str("3"));
- CHECK_NE(NULL, s3);
+ CHECK(s3);
CHECK_EQ(2, heap_profiler->GetSnapshotCount());
unsigned uid3 = s3->GetUid();
CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid3));
CHECK_EQ(s3, FindHeapSnapshot(heap_profiler, uid3));
const_cast<v8::HeapSnapshot*>(s2)->Delete();
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
- CHECK_EQ(NULL, FindHeapSnapshot(heap_profiler, uid2));
+ CHECK(!FindHeapSnapshot(heap_profiler, uid2));
CHECK_EQ(s3, FindHeapSnapshot(heap_profiler, uid3));
const_cast<v8::HeapSnapshot*>(s3)->Delete();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK_EQ(NULL, FindHeapSnapshot(heap_profiler, uid3));
+ CHECK(!FindHeapSnapshot(heap_profiler, uid3));
}
@@ -1699,10 +1692,11 @@ TEST(GlobalObjectName) {
&name_resolver);
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
- CHECK_EQ("Object / Global object name" ,
- const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(global))->name());
+ CHECK(global);
+ CHECK_EQ(0,
+ strcmp("Object / Global object name",
+ const_cast<i::HeapEntry*>(
+ reinterpret_cast<const i::HeapEntry*>(global))->name()));
}
@@ -1717,13 +1711,13 @@ TEST(GlobalObjectFields) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* builtins =
GetProperty(global, v8::HeapGraphEdge::kInternal, "builtins");
- CHECK_NE(NULL, builtins);
+ CHECK(builtins);
const v8::HeapGraphNode* native_context =
GetProperty(global, v8::HeapGraphEdge::kInternal, "native_context");
- CHECK_NE(NULL, native_context);
+ CHECK(native_context);
const v8::HeapGraphNode* global_proxy =
GetProperty(global, v8::HeapGraphEdge::kInternal, "global_proxy");
- CHECK_NE(NULL, global_proxy);
+ CHECK(global_proxy);
}
@@ -1751,7 +1745,7 @@ TEST(NodesIteration) {
heap_profiler->TakeHeapSnapshot(v8_str("iteration"));
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
+ CHECK(global);
// Verify that we can find this object by iteration.
const int nodes_count = snapshot->GetNodesCount();
int count = 0;
@@ -1900,19 +1894,19 @@ TEST(FastCaseAccessors) {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
+ CHECK(global);
const v8::HeapGraphNode* obj1 =
GetProperty(global, v8::HeapGraphEdge::kProperty, "obj1");
- CHECK_NE(NULL, obj1);
+ CHECK(obj1);
const v8::HeapGraphNode* func;
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "get propWithGetter");
- CHECK_NE(NULL, func);
+ CHECK(func);
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "set propWithGetter");
- CHECK_EQ(NULL, func);
+ CHECK(!func);
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "set propWithSetter");
- CHECK_NE(NULL, func);
+ CHECK(func);
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "get propWithSetter");
- CHECK_EQ(NULL, func);
+ CHECK(!func);
}
@@ -1945,15 +1939,15 @@ TEST(FastCaseRedefinedAccessors) {
heap_profiler->TakeHeapSnapshot(v8_str("fastCaseAccessors"));
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
+ CHECK(global);
const v8::HeapGraphNode* obj1 =
GetProperty(global, v8::HeapGraphEdge::kProperty, "obj1");
- CHECK_NE(NULL, obj1);
+ CHECK(obj1);
const v8::HeapGraphNode* func;
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "get prop");
- CHECK_NE(NULL, func);
+ CHECK(func);
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "set prop");
- CHECK_NE(NULL, func);
+ CHECK(func);
}
@@ -1975,19 +1969,19 @@ TEST(SlowCaseAccessors) {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
+ CHECK(global);
const v8::HeapGraphNode* obj1 =
GetProperty(global, v8::HeapGraphEdge::kProperty, "obj1");
- CHECK_NE(NULL, obj1);
+ CHECK(obj1);
const v8::HeapGraphNode* func;
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "get propWithGetter");
- CHECK_NE(NULL, func);
+ CHECK(func);
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "set propWithGetter");
- CHECK_EQ(NULL, func);
+ CHECK(!func);
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "set propWithSetter");
- CHECK_NE(NULL, func);
+ CHECK(func);
func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "get propWithSetter");
- CHECK_EQ(NULL, func);
+ CHECK(!func);
}
@@ -2006,10 +2000,10 @@ TEST(HiddenPropertiesFastCase) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* c =
GetProperty(global, v8::HeapGraphEdge::kProperty, "c");
- CHECK_NE(NULL, c);
+ CHECK(c);
const v8::HeapGraphNode* hidden_props =
GetProperty(c, v8::HeapGraphEdge::kInternal, "hidden_properties");
- CHECK_EQ(NULL, hidden_props);
+ CHECK(!hidden_props);
v8::Handle<v8::Value> cHandle =
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "c"));
@@ -2021,10 +2015,10 @@ TEST(HiddenPropertiesFastCase) {
CHECK(ValidateSnapshot(snapshot));
global = GetGlobalObject(snapshot);
c = GetProperty(global, v8::HeapGraphEdge::kProperty, "c");
- CHECK_NE(NULL, c);
+ CHECK(c);
hidden_props = GetProperty(c, v8::HeapGraphEdge::kInternal,
"hidden_properties");
- CHECK_NE(NULL, hidden_props);
+ CHECK(hidden_props);
}
@@ -2040,31 +2034,31 @@ TEST(AccessorInfo) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* foo =
GetProperty(global, v8::HeapGraphEdge::kProperty, "foo");
- CHECK_NE(NULL, foo);
+ CHECK(foo);
const v8::HeapGraphNode* map =
GetProperty(foo, v8::HeapGraphEdge::kInternal, "map");
- CHECK_NE(NULL, map);
+ CHECK(map);
const v8::HeapGraphNode* descriptors =
GetProperty(map, v8::HeapGraphEdge::kInternal, "descriptors");
- CHECK_NE(NULL, descriptors);
+ CHECK(descriptors);
const v8::HeapGraphNode* length_name =
GetProperty(descriptors, v8::HeapGraphEdge::kInternal, "2");
- CHECK_NE(NULL, length_name);
- CHECK_EQ("length", *v8::String::Utf8Value(length_name->GetName()));
+ CHECK(length_name);
+ CHECK_EQ(0, strcmp("length", *v8::String::Utf8Value(length_name->GetName())));
const v8::HeapGraphNode* length_accessor =
GetProperty(descriptors, v8::HeapGraphEdge::kInternal, "4");
- CHECK_NE(NULL, length_accessor);
- CHECK_EQ("system / ExecutableAccessorInfo",
- *v8::String::Utf8Value(length_accessor->GetName()));
+ CHECK(length_accessor);
+ CHECK_EQ(0, strcmp("system / ExecutableAccessorInfo",
+ *v8::String::Utf8Value(length_accessor->GetName())));
const v8::HeapGraphNode* name =
GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "name");
- CHECK_NE(NULL, name);
+ CHECK(name);
const v8::HeapGraphNode* getter =
GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "getter");
- CHECK_NE(NULL, getter);
+ CHECK(getter);
const v8::HeapGraphNode* setter =
GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "setter");
- CHECK_NE(NULL, setter);
+ CHECK(setter);
}
@@ -2085,10 +2079,10 @@ bool HasWeakGlobalHandle() {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* gc_roots = GetNode(
snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "(GC roots)");
- CHECK_NE(NULL, gc_roots);
+ CHECK(gc_roots);
const v8::HeapGraphNode* global_handles = GetNode(
gc_roots, v8::HeapGraphNode::kSynthetic, "(Global handles)");
- CHECK_NE(NULL, global_handles);
+ CHECK(global_handles);
return HasWeakEdge(global_handles);
}
@@ -2125,7 +2119,7 @@ TEST(SfiAndJsFunctionWeakRefs) {
heap_profiler->TakeHeapSnapshot(v8_str("fun"));
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
+ CHECK(global);
const v8::HeapGraphNode* fun =
GetProperty(global, v8::HeapGraphEdge::kProperty, "fun");
CHECK(!HasWeakEdge(fun));
@@ -2154,7 +2148,7 @@ TEST(NoDebugObjectInSnapshot) {
const v8::HeapGraphNode* global = edge->GetToNode();
const v8::HeapGraphNode* foo =
GetProperty(global, v8::HeapGraphEdge::kProperty, "foo");
- CHECK_NE(NULL, foo);
+ CHECK(foo);
}
}
CHECK_EQ(1, globals_count);
@@ -2172,10 +2166,10 @@ TEST(AllStrongGcRootsHaveNames) {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* gc_roots = GetNode(
snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "(GC roots)");
- CHECK_NE(NULL, gc_roots);
+ CHECK(gc_roots);
const v8::HeapGraphNode* strong_roots = GetNode(
gc_roots, v8::HeapGraphNode::kSynthetic, "(Strong roots)");
- CHECK_NE(NULL, strong_roots);
+ CHECK(strong_roots);
for (int i = 0; i < strong_roots->GetChildrenCount(); ++i) {
const v8::HeapGraphEdge* edge = strong_roots->GetChild(i);
CHECK_EQ(v8::HeapGraphEdge::kInternal, edge->GetType());
@@ -2196,13 +2190,13 @@ TEST(NoRefsToNonEssentialEntries) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* global_object =
GetProperty(global, v8::HeapGraphEdge::kProperty, "global_object");
- CHECK_NE(NULL, global_object);
+ CHECK(global_object);
const v8::HeapGraphNode* properties =
GetProperty(global_object, v8::HeapGraphEdge::kInternal, "properties");
- CHECK_EQ(NULL, properties);
+ CHECK(!properties);
const v8::HeapGraphNode* elements =
GetProperty(global_object, v8::HeapGraphEdge::kInternal, "elements");
- CHECK_EQ(NULL, elements);
+ CHECK(!elements);
}
@@ -2217,17 +2211,17 @@ TEST(MapHasDescriptorsAndTransitions) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* global_object =
GetProperty(global, v8::HeapGraphEdge::kProperty, "obj");
- CHECK_NE(NULL, global_object);
+ CHECK(global_object);
const v8::HeapGraphNode* map =
GetProperty(global_object, v8::HeapGraphEdge::kInternal, "map");
- CHECK_NE(NULL, map);
+ CHECK(map);
const v8::HeapGraphNode* own_descriptors = GetProperty(
map, v8::HeapGraphEdge::kInternal, "descriptors");
- CHECK_NE(NULL, own_descriptors);
+ CHECK(own_descriptors);
const v8::HeapGraphNode* own_transitions = GetProperty(
map, v8::HeapGraphEdge::kInternal, "transitions");
- CHECK_EQ(NULL, own_transitions);
+ CHECK(!own_transitions);
}
@@ -2255,13 +2249,13 @@ TEST(ManyLocalsInSharedContext) {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
+ CHECK(global);
const v8::HeapGraphNode* ok_object =
GetProperty(global, v8::HeapGraphEdge::kProperty, "ok");
- CHECK_NE(NULL, ok_object);
+ CHECK(ok_object);
const v8::HeapGraphNode* context_object =
GetProperty(ok_object, v8::HeapGraphEdge::kInternal, "context");
- CHECK_NE(NULL, context_object);
+ CHECK(context_object);
// Check the objects are not duplicated in the context.
CHECK_EQ(v8::internal::Context::MIN_CONTEXT_SLOTS + num_objects - 1,
context_object->GetChildrenCount());
@@ -2272,7 +2266,7 @@ TEST(ManyLocalsInSharedContext) {
i::SNPrintF(var_name, "f_%d", i);
const v8::HeapGraphNode* f_object = GetProperty(
context_object, v8::HeapGraphEdge::kContextVariable, var_name.start());
- CHECK_NE(NULL, f_object);
+ CHECK(f_object);
}
}
@@ -2290,13 +2284,13 @@ TEST(AllocationSitesAreVisible) {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
+ CHECK(global);
const v8::HeapGraphNode* fun_code =
GetProperty(global, v8::HeapGraphEdge::kProperty, "fun");
- CHECK_NE(NULL, fun_code);
+ CHECK(fun_code);
const v8::HeapGraphNode* literals =
GetProperty(fun_code, v8::HeapGraphEdge::kInternal, "literals");
- CHECK_NE(NULL, literals);
+ CHECK(literals);
CHECK_EQ(v8::HeapGraphNode::kArray, literals->GetType());
CHECK_EQ(2, literals->GetChildrenCount());
@@ -2305,16 +2299,16 @@ TEST(AllocationSitesAreVisible) {
const v8::HeapGraphEdge* prop = literals->GetChild(1);
const v8::HeapGraphNode* allocation_site = prop->GetToNode();
v8::String::Utf8Value name(allocation_site->GetName());
- CHECK_EQ("system / AllocationSite", *name);
+ CHECK_EQ(0, strcmp("system / AllocationSite", *name));
const v8::HeapGraphNode* transition_info =
GetProperty(allocation_site, v8::HeapGraphEdge::kInternal,
"transition_info");
- CHECK_NE(NULL, transition_info);
+ CHECK(transition_info);
const v8::HeapGraphNode* elements =
GetProperty(transition_info, v8::HeapGraphEdge::kInternal,
"elements");
- CHECK_NE(NULL, elements);
+ CHECK(elements);
CHECK_EQ(v8::HeapGraphNode::kArray, elements->GetType());
CHECK_EQ(v8::internal::FixedArray::SizeFor(3),
static_cast<int>(elements->GetShallowSize()));
@@ -2324,13 +2318,13 @@ TEST(AllocationSitesAreVisible) {
CHECK(array_val->IsArray());
v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(array_val);
// Verify the array is "a" in the code above.
- CHECK_EQ(3, array->Length());
- CHECK_EQ(v8::Integer::New(isolate, 3),
- array->Get(v8::Integer::New(isolate, 0)));
- CHECK_EQ(v8::Integer::New(isolate, 2),
- array->Get(v8::Integer::New(isolate, 1)));
- CHECK_EQ(v8::Integer::New(isolate, 1),
- array->Get(v8::Integer::New(isolate, 2)));
+ CHECK_EQ(3u, array->Length());
+ CHECK(v8::Integer::New(isolate, 3)
+ ->Equals(array->Get(v8::Integer::New(isolate, 0))));
+ CHECK(v8::Integer::New(isolate, 2)
+ ->Equals(array->Get(v8::Integer::New(isolate, 1))));
+ CHECK(v8::Integer::New(isolate, 1)
+ ->Equals(array->Get(v8::Integer::New(isolate, 2))));
}
@@ -2345,10 +2339,10 @@ TEST(JSFunctionHasCodeLink) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* foo_func =
GetProperty(global, v8::HeapGraphEdge::kProperty, "foo");
- CHECK_NE(NULL, foo_func);
+ CHECK(foo_func);
const v8::HeapGraphNode* code =
GetProperty(foo_func, v8::HeapGraphEdge::kInternal, "code");
- CHECK_NE(NULL, code);
+ CHECK(code);
}
@@ -2393,22 +2387,19 @@ TEST(CheckCodeNames) {
};
const v8::HeapGraphNode* node = GetNodeByPath(snapshot,
stub_path, arraysize(stub_path));
- CHECK_NE(NULL, node);
+ CHECK(node);
- const char* builtin_path1[] = {
- "::(GC roots)",
- "::(Builtins)",
- "::(KeyedLoadIC_Generic builtin)"
- };
+ const char* builtin_path1[] = {"::(GC roots)", "::(Builtins)",
+ "::(KeyedLoadIC_Megamorphic builtin)"};
node = GetNodeByPath(snapshot, builtin_path1, arraysize(builtin_path1));
- CHECK_NE(NULL, node);
+ CHECK(node);
const char* builtin_path2[] = {"::(GC roots)", "::(Builtins)",
"::(CompileLazy builtin)"};
node = GetNodeByPath(snapshot, builtin_path2, arraysize(builtin_path2));
- CHECK_NE(NULL, node);
+ CHECK(node);
v8::String::Utf8Value node_name(node->GetName());
- CHECK_EQ("(CompileLazy builtin)", *node_name);
+ CHECK_EQ(0, strcmp("(CompileLazy builtin)", *node_name));
}
@@ -2491,7 +2482,7 @@ TEST(ArrayGrowLeftTrim) {
const char* names[] = {""};
AllocationTracker* tracker =
reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
- CHECK_NE(NULL, tracker);
+ CHECK(tracker);
// Resolve all function locations.
tracker->PrepareForSerialization();
// Print for better diagnostics in case of failure.
@@ -2499,9 +2490,9 @@ TEST(ArrayGrowLeftTrim) {
AllocationTraceNode* node =
FindNode(tracker, Vector<const char*>(names, arraysize(names)));
- CHECK_NE(NULL, node);
- CHECK_GE(node->allocation_count(), 2);
- CHECK_GE(node->allocation_size(), 4 * 5);
+ CHECK(node);
+ CHECK_GE(node->allocation_count(), 2u);
+ CHECK_GE(node->allocation_size(), 4u * 5u);
heap_profiler->StopTrackingHeapObjects();
}
@@ -2517,7 +2508,7 @@ TEST(TrackHeapAllocations) {
AllocationTracker* tracker =
reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
- CHECK_NE(NULL, tracker);
+ CHECK(tracker);
// Resolve all function locations.
tracker->PrepareForSerialization();
// Print for better diagnostics in case of failure.
@@ -2526,8 +2517,8 @@ TEST(TrackHeapAllocations) {
const char* names[] = {"", "start", "f_0_0", "f_0_1", "f_0_2"};
AllocationTraceNode* node =
FindNode(tracker, Vector<const char*>(names, arraysize(names)));
- CHECK_NE(NULL, node);
- CHECK_GE(node->allocation_count(), 100);
+ CHECK(node);
+ CHECK_GE(node->allocation_count(), 100u);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
heap_profiler->StopTrackingHeapObjects();
}
@@ -2567,7 +2558,7 @@ TEST(TrackBumpPointerAllocations) {
AllocationTracker* tracker =
reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
- CHECK_NE(NULL, tracker);
+ CHECK(tracker);
// Resolve all function locations.
tracker->PrepareForSerialization();
// Print for better diagnostics in case of failure.
@@ -2575,8 +2566,8 @@ TEST(TrackBumpPointerAllocations) {
AllocationTraceNode* node =
FindNode(tracker, Vector<const char*>(names, arraysize(names)));
- CHECK_NE(NULL, node);
- CHECK_GE(node->allocation_count(), 100);
+ CHECK(node);
+ CHECK_GE(node->allocation_count(), 100u);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
heap_profiler->StopTrackingHeapObjects();
}
@@ -2593,7 +2584,7 @@ TEST(TrackBumpPointerAllocations) {
AllocationTracker* tracker =
reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
- CHECK_NE(NULL, tracker);
+ CHECK(tracker);
// Resolve all function locations.
tracker->PrepareForSerialization();
// Print for better diagnostics in case of failure.
@@ -2601,8 +2592,8 @@ TEST(TrackBumpPointerAllocations) {
AllocationTraceNode* node =
FindNode(tracker, Vector<const char*>(names, arraysize(names)));
- CHECK_NE(NULL, node);
- CHECK_LT(node->allocation_count(), 100);
+ CHECK(node);
+ CHECK_LT(node->allocation_count(), 100u);
CcTest::heap()->DisableInlineAllocation();
heap_profiler->StopTrackingHeapObjects();
@@ -2623,7 +2614,7 @@ TEST(TrackV8ApiAllocation) {
AllocationTracker* tracker =
reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
- CHECK_NE(NULL, tracker);
+ CHECK(tracker);
// Resolve all function locations.
tracker->PrepareForSerialization();
// Print for better diagnostics in case of failure.
@@ -2631,8 +2622,8 @@ TEST(TrackV8ApiAllocation) {
AllocationTraceNode* node =
FindNode(tracker, Vector<const char*>(names, arraysize(names)));
- CHECK_NE(NULL, node);
- CHECK_GE(node->allocation_count(), 2);
+ CHECK(node);
+ CHECK_GE(node->allocation_count(), 2u);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
heap_profiler->StopTrackingHeapObjects();
}
@@ -2649,16 +2640,16 @@ TEST(ArrayBufferAndArrayBufferView) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* arr1_obj =
GetProperty(global, v8::HeapGraphEdge::kProperty, "arr1");
- CHECK_NE(NULL, arr1_obj);
+ CHECK(arr1_obj);
const v8::HeapGraphNode* arr1_buffer =
GetProperty(arr1_obj, v8::HeapGraphEdge::kInternal, "buffer");
- CHECK_NE(NULL, arr1_buffer);
+ CHECK(arr1_buffer);
const v8::HeapGraphNode* first_view =
GetProperty(arr1_buffer, v8::HeapGraphEdge::kWeak, "weak_first_view");
- CHECK_NE(NULL, first_view);
+ CHECK(first_view);
const v8::HeapGraphNode* backing_store =
GetProperty(arr1_buffer, v8::HeapGraphEdge::kInternal, "backing_store");
- CHECK_NE(NULL, backing_store);
+ CHECK(backing_store);
CHECK_EQ(400, static_cast<int>(backing_store->GetShallowSize()));
}
@@ -2708,16 +2699,16 @@ TEST(ArrayBufferSharedBackingStore) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* ab1_node =
GetProperty(global, v8::HeapGraphEdge::kProperty, "ab1");
- CHECK_NE(NULL, ab1_node);
+ CHECK(ab1_node);
const v8::HeapGraphNode* ab1_data =
GetProperty(ab1_node, v8::HeapGraphEdge::kInternal, "backing_store");
- CHECK_NE(NULL, ab1_data);
+ CHECK(ab1_data);
const v8::HeapGraphNode* ab2_node =
GetProperty(global, v8::HeapGraphEdge::kProperty, "ab2");
- CHECK_NE(NULL, ab2_node);
+ CHECK(ab2_node);
const v8::HeapGraphNode* ab2_data =
GetProperty(ab2_node, v8::HeapGraphEdge::kInternal, "backing_store");
- CHECK_NE(NULL, ab2_data);
+ CHECK(ab2_data);
CHECK_EQ(ab1_data, ab2_data);
CHECK_EQ(2, GetRetainersCount(snapshot, ab1_data));
free(data);
@@ -2743,12 +2734,12 @@ TEST(BoxObject) {
const v8::HeapGraphNode* global_node = GetGlobalObject(snapshot);
const v8::HeapGraphNode* box_node =
GetProperty(global_node, v8::HeapGraphEdge::kElement, "0");
- CHECK_NE(NULL, box_node);
+ CHECK(box_node);
v8::String::Utf8Value box_node_name(box_node->GetName());
- CHECK_EQ("system / Box", *box_node_name);
+ CHECK_EQ(0, strcmp("system / Box", *box_node_name));
const v8::HeapGraphNode* box_value =
GetProperty(box_node, v8::HeapGraphEdge::kInternal, "value");
- CHECK_NE(NULL, box_value);
+ CHECK(box_value);
}
@@ -2771,10 +2762,10 @@ TEST(WeakContainers) {
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* obj =
GetProperty(global, v8::HeapGraphEdge::kProperty, "obj");
- CHECK_NE(NULL, obj);
+ CHECK(obj);
const v8::HeapGraphNode* map =
GetProperty(obj, v8::HeapGraphEdge::kInternal, "map");
- CHECK_NE(NULL, map);
+ CHECK(map);
const v8::HeapGraphNode* dependent_code =
GetProperty(map, v8::HeapGraphEdge::kInternal, "dependent_code");
if (!dependent_code) return;
@@ -2795,53 +2786,53 @@ static inline i::Address ToAddress(int n) {
TEST(AddressToTraceMap) {
i::AddressToTraceMap map;
- CHECK_EQ(0, map.GetTraceNodeId(ToAddress(150)));
+ CHECK_EQ(0u, map.GetTraceNodeId(ToAddress(150)));
// [0x100, 0x200) -> 1
map.AddRange(ToAddress(0x100), 0x100, 1U);
- CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x50)));
- CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x100)));
- CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x150)));
- CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x100 + 0x100)));
- CHECK_EQ(1, static_cast<int>(map.size()));
+ CHECK_EQ(0u, map.GetTraceNodeId(ToAddress(0x50)));
+ CHECK_EQ(1u, map.GetTraceNodeId(ToAddress(0x100)));
+ CHECK_EQ(1u, map.GetTraceNodeId(ToAddress(0x150)));
+ CHECK_EQ(0u, map.GetTraceNodeId(ToAddress(0x100 + 0x100)));
+ CHECK_EQ(1u, map.size());
// [0x100, 0x200) -> 1, [0x200, 0x300) -> 2
map.AddRange(ToAddress(0x200), 0x100, 2U);
- CHECK_EQ(2, map.GetTraceNodeId(ToAddress(0x2a0)));
- CHECK_EQ(2, static_cast<int>(map.size()));
+ CHECK_EQ(2u, map.GetTraceNodeId(ToAddress(0x2a0)));
+ CHECK_EQ(2u, map.size());
// [0x100, 0x180) -> 1, [0x180, 0x280) -> 3, [0x280, 0x300) -> 2
map.AddRange(ToAddress(0x180), 0x100, 3U);
- CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x17F)));
- CHECK_EQ(2, map.GetTraceNodeId(ToAddress(0x280)));
- CHECK_EQ(3, map.GetTraceNodeId(ToAddress(0x180)));
- CHECK_EQ(3, static_cast<int>(map.size()));
+ CHECK_EQ(1u, map.GetTraceNodeId(ToAddress(0x17F)));
+ CHECK_EQ(2u, map.GetTraceNodeId(ToAddress(0x280)));
+ CHECK_EQ(3u, map.GetTraceNodeId(ToAddress(0x180)));
+ CHECK_EQ(3u, map.size());
// [0x100, 0x180) -> 1, [0x180, 0x280) -> 3, [0x280, 0x300) -> 2,
// [0x400, 0x500) -> 4
map.AddRange(ToAddress(0x400), 0x100, 4U);
- CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x17F)));
- CHECK_EQ(2, map.GetTraceNodeId(ToAddress(0x280)));
- CHECK_EQ(3, map.GetTraceNodeId(ToAddress(0x180)));
- CHECK_EQ(4, map.GetTraceNodeId(ToAddress(0x450)));
- CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x500)));
- CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x350)));
- CHECK_EQ(4, static_cast<int>(map.size()));
+ CHECK_EQ(1u, map.GetTraceNodeId(ToAddress(0x17F)));
+ CHECK_EQ(2u, map.GetTraceNodeId(ToAddress(0x280)));
+ CHECK_EQ(3u, map.GetTraceNodeId(ToAddress(0x180)));
+ CHECK_EQ(4u, map.GetTraceNodeId(ToAddress(0x450)));
+ CHECK_EQ(0u, map.GetTraceNodeId(ToAddress(0x500)));
+ CHECK_EQ(0u, map.GetTraceNodeId(ToAddress(0x350)));
+ CHECK_EQ(4u, map.size());
// [0x100, 0x180) -> 1, [0x180, 0x200) -> 3, [0x200, 0x600) -> 5
map.AddRange(ToAddress(0x200), 0x400, 5U);
- CHECK_EQ(5, map.GetTraceNodeId(ToAddress(0x200)));
- CHECK_EQ(5, map.GetTraceNodeId(ToAddress(0x400)));
- CHECK_EQ(3, static_cast<int>(map.size()));
+ CHECK_EQ(5u, map.GetTraceNodeId(ToAddress(0x200)));
+ CHECK_EQ(5u, map.GetTraceNodeId(ToAddress(0x400)));
+ CHECK_EQ(3u, map.size());
// [0x100, 0x180) -> 1, [0x180, 0x200) -> 7, [0x200, 0x600) ->5
map.AddRange(ToAddress(0x180), 0x80, 6U);
map.AddRange(ToAddress(0x180), 0x80, 7U);
- CHECK_EQ(7, map.GetTraceNodeId(ToAddress(0x180)));
- CHECK_EQ(5, map.GetTraceNodeId(ToAddress(0x200)));
- CHECK_EQ(3, static_cast<int>(map.size()));
+ CHECK_EQ(7u, map.GetTraceNodeId(ToAddress(0x180)));
+ CHECK_EQ(5u, map.GetTraceNodeId(ToAddress(0x200)));
+ CHECK_EQ(3u, map.size());
map.Clear();
- CHECK_EQ(0, static_cast<int>(map.size()));
- CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x400)));
+ CHECK_EQ(0u, map.size());
+ CHECK_EQ(0u, map.GetTraceNodeId(ToAddress(0x400)));
}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index f8a7df20c6..ae3c1d365c 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -36,6 +36,7 @@
#include "src/global-handles.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
+#include "src/snapshot.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -160,8 +161,7 @@ TEST(HeapObjects) {
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Handle<Smi>::cast(value)->value());
-#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64) && \
- !defined(V8_TARGET_ARCH_MIPS64)
+#if !defined(V8_TARGET_ARCH_64_BIT)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = factory->NewNumberFromInt(Smi::kMinValue - 1);
CHECK(value->IsHeapNumber());
@@ -650,7 +650,7 @@ TEST(ObjectProperties) {
CHECK(maybe.value);
// delete first
- JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION).Check();
+ JSReceiver::DeleteProperty(obj, first, SLOPPY).Check();
maybe = JSReceiver::HasOwnProperty(obj, first);
CHECK(maybe.has_value);
CHECK(!maybe.value);
@@ -666,11 +666,11 @@ TEST(ObjectProperties) {
CHECK(maybe.value);
// delete first and then second
- JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION).Check();
+ JSReceiver::DeleteProperty(obj, first, SLOPPY).Check();
maybe = JSReceiver::HasOwnProperty(obj, second);
CHECK(maybe.has_value);
CHECK(maybe.value);
- JSReceiver::DeleteProperty(obj, second, JSReceiver::NORMAL_DELETION).Check();
+ JSReceiver::DeleteProperty(obj, second, SLOPPY).Check();
maybe = JSReceiver::HasOwnProperty(obj, first);
CHECK(maybe.has_value);
CHECK(!maybe.value);
@@ -689,11 +689,11 @@ TEST(ObjectProperties) {
CHECK(maybe.value);
// delete second and then first
- JSReceiver::DeleteProperty(obj, second, JSReceiver::NORMAL_DELETION).Check();
+ JSReceiver::DeleteProperty(obj, second, SLOPPY).Check();
maybe = JSReceiver::HasOwnProperty(obj, first);
CHECK(maybe.has_value);
CHECK(maybe.value);
- JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION).Check();
+ JSReceiver::DeleteProperty(obj, first, SLOPPY).Check();
maybe = JSReceiver::HasOwnProperty(obj, first);
CHECK(maybe.has_value);
CHECK(!maybe.value);
@@ -1387,6 +1387,8 @@ TEST(CompilationCacheCachingBehavior) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
CompilationCache* compilation_cache = isolate->compilation_cache();
+ LanguageMode language_mode =
+ construct_language_mode(FLAG_use_strict, FLAG_use_strong);
v8::HandleScope scope(CcTest::isolate());
const char* raw_source =
@@ -1407,7 +1409,8 @@ TEST(CompilationCacheCachingBehavior) {
// On first compilation, only a hash is inserted in the code cache. We can't
// find that value.
MaybeHandle<SharedFunctionInfo> info = compilation_cache->LookupScript(
- source, Handle<Object>(), 0, 0, true, native_context);
+ source, Handle<Object>(), 0, 0, false, true, native_context,
+ language_mode);
CHECK(info.is_null());
{
@@ -1417,16 +1420,16 @@ TEST(CompilationCacheCachingBehavior) {
// On second compilation, the hash is replaced by a real cache entry mapping
// the source to the shared function info containing the code.
- info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, true,
- native_context);
+ info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, false,
+ true, native_context, language_mode);
CHECK(!info.is_null());
heap->CollectAllGarbage(Heap::kNoGCFlags);
// On second compilation, the hash is replaced by a real cache entry mapping
// the source to the shared function info containing the code.
- info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, true,
- native_context);
+ info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, false,
+ true, native_context, language_mode);
CHECK(!info.is_null());
while (!info.ToHandleChecked()->code()->IsOld()) {
@@ -1435,8 +1438,8 @@ TEST(CompilationCacheCachingBehavior) {
heap->CollectAllGarbage(Heap::kNoGCFlags);
// Ensure code aging cleared the entry from the cache.
- info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, true,
- native_context);
+ info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, false,
+ true, native_context, language_mode);
CHECK(info.is_null());
{
@@ -1446,8 +1449,8 @@ TEST(CompilationCacheCachingBehavior) {
// On first compilation, only a hash is inserted in the code cache. We can't
// find that value.
- info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, true,
- native_context);
+ info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, false,
+ true, native_context, language_mode);
CHECK(info.is_null());
for (int i = 0; i < CompilationCacheTable::kHashGenerations; i++) {
@@ -1461,8 +1464,8 @@ TEST(CompilationCacheCachingBehavior) {
// If we aged the cache before caching the script, ensure that we didn't cache
// on next compilation.
- info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, true,
- native_context);
+ info = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0, false,
+ true, native_context, language_mode);
CHECK(info.is_null());
}
@@ -2207,11 +2210,8 @@ TEST(PrototypeTransitionClearing) {
// Verify that prototype transitions array was compacted.
FixedArray* trans = baseObject->map()->GetPrototypeTransitions();
for (int i = initialTransitions; i < initialTransitions + transitions; i++) {
- int j = Map::kProtoTransitionHeaderSize +
- i * Map::kProtoTransitionElementsPerEntry;
- CHECK(trans->get(j + Map::kProtoTransitionMapOffset)->IsMap());
- Object* proto = trans->get(j + Map::kProtoTransitionPrototypeOffset);
- CHECK(proto->IsJSObject());
+ int j = Map::kProtoTransitionHeaderSize + i;
+ CHECK(trans->get(j)->IsMap());
}
// Make sure next prototype is placed on an old-space evacuation candidate.
@@ -3320,7 +3320,7 @@ TEST(Regress2211) {
}
-TEST(IncrementalMarkingClearsTypeFeedbackInfo) {
+TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -3355,16 +3355,16 @@ TEST(IncrementalMarkingClearsTypeFeedbackInfo) {
CHECK_EQ(expected_slots, feedback_vector->ICSlots());
int slot1 = 0;
int slot2 = 1;
- CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot1))->IsJSFunction());
- CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot2))->IsJSFunction());
+ CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot1))->IsWeakCell());
+ CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot2))->IsWeakCell());
SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK_EQ(feedback_vector->Get(FeedbackVectorICSlot(slot1)),
- *TypeFeedbackVector::UninitializedSentinel(CcTest::i_isolate()));
- CHECK_EQ(feedback_vector->Get(FeedbackVectorICSlot(slot2)),
- *TypeFeedbackVector::UninitializedSentinel(CcTest::i_isolate()));
+ CHECK(!WeakCell::cast(feedback_vector->Get(FeedbackVectorICSlot(slot1)))
+ ->cleared());
+ CHECK(!WeakCell::cast(feedback_vector->Get(FeedbackVectorICSlot(slot2)))
+ ->cleared());
}
@@ -3453,10 +3453,8 @@ TEST(IncrementalMarkingClearsMonomorphicIC) {
// originating from a different native context.
CcTest::global()->Set(v8_str("obj1"), obj1);
CompileRun("function f(o) { return o.x; } f(obj1); f(obj1);");
- Handle<JSFunction> f =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(
- CcTest::global()->Get(v8_str("f"))));
+ Handle<JSFunction> f = v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
if (FLAG_vector_ics) {
@@ -3552,10 +3550,8 @@ TEST(IncrementalMarkingClearsPolymorphicIC) {
CcTest::global()->Set(v8_str("obj1"), obj1);
CcTest::global()->Set(v8_str("obj2"), obj2);
CompileRun("function f(o) { return o.x; } f(obj1); f(obj1); f(obj2);");
- Handle<JSFunction> f =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(
- CcTest::global()->Get(v8_str("f"))));
+ Handle<JSFunction> f = v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
if (FLAG_vector_ics) {
@@ -3872,21 +3868,6 @@ TEST(Regress169209) {
}
-// Helper function that simulates a fill new-space in the heap.
-static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
- int extra_bytes) {
- int space_remaining = static_cast<int>(
- *space->allocation_limit_address() - *space->allocation_top_address());
- CHECK(space_remaining >= extra_bytes);
- int new_linear_size = space_remaining - extra_bytes;
- v8::internal::AllocationResult allocation =
- space->AllocateRaw(new_linear_size);
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(space->heap(), new_linear_size);
-}
-
-
TEST(Regress169928) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_crankshaft = false;
@@ -4187,8 +4168,9 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
DependentCode::GroupStartIndexes starts(site->dependent_code());
CHECK_GE(starts.number_of_entries(), 1);
int index = starts.at(DependentCode::kAllocationSiteTransitionChangedGroup);
- CHECK(site->dependent_code()->is_code_at(index));
- Code* function_bar = site->dependent_code()->code_at(index);
+ CHECK(site->dependent_code()->object_at(index)->IsWeakCell());
+ Code* function_bar = Code::cast(
+ WeakCell::cast(site->dependent_code()->object_at(index))->value());
Handle<JSFunction> bar_handle =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
@@ -4206,7 +4188,8 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
// longer referred to by dependent_code().
DependentCode::GroupStartIndexes starts(site->dependent_code());
int index = starts.at(DependentCode::kAllocationSiteTransitionChangedGroup);
- CHECK(!(site->dependent_code()->is_code_at(index)));
+ CHECK(site->dependent_code()->object_at(index)->IsWeakCell() &&
+ WeakCell::cast(site->dependent_code()->object_at(index))->cleared());
}
@@ -5089,6 +5072,17 @@ TEST(Regress442710) {
}
+TEST(NumberStringCacheSize) {
+ if (!Snapshot::HaveASnapshotToStartFrom()) return;
+ // Test that the number-string cache has not been resized in the snapshot.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ CHECK_EQ(TestHeap::kInitialNumberStringCacheSize * 2,
+ heap->number_string_cache()->length());
+}
+
+
#ifdef DEBUG
TEST(PathTracer) {
CcTest::InitializeVM();
@@ -5099,3 +5093,25 @@ TEST(PathTracer) {
CcTest::i_isolate()->heap()->TracePathToObject(*o);
}
#endif // DEBUG
+
+
+TEST(FirstPageFitsStartup) {
+ // Test that the first page sizes provided by the default snapshot are large
+ // enough to fit everything right after startup and creating one context.
+ // If this test fails, we are allocating too much aside from deserialization.
+ if (!Snapshot::HaveASnapshotToStartFrom()) return;
+ if (Snapshot::EmbedsScript()) return;
+ CcTest::InitializeVM();
+ LocalContext env;
+ PagedSpaces spaces(CcTest::heap());
+ for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
+ uint32_t default_size = s->AreaSize();
+ uint32_t reduced_size = Snapshot::SizeOfFirstPage(s->identity());
+ if (reduced_size == default_size) continue;
+ int counter = 0;
+ Page* page = NULL;
+ for (PageIterator it(s); it.has_next(); page = it.next()) counter++;
+ CHECK_LE(counter, 1);
+ CHECK(static_cast<uint32_t>(page->area_size()) == reduced_size);
+ }
+}
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index 6a5f0b2997..8419dc5a43 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -95,7 +95,7 @@ void CompareStringsOneWay(const char* s1, const char* s2,
int expected_diff_parameter = -1) {
StringCompareInput input(s1, s2);
- Zone zone(CcTest::i_isolate());
+ Zone zone;
DiffChunkStruct* first_chunk;
ListDiffOutputWriter writer(&first_chunk, &zone);
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index dc5404e92a..c909a02125 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -67,7 +67,8 @@ class KangarooThread : public v8::base::Thread {
{
v8::Locker locker(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
- CHECK_EQ(isolate_, v8::internal::Isolate::Current());
+ CHECK_EQ(reinterpret_cast<v8::internal::Isolate*>(isolate_),
+ v8::internal::Isolate::Current());
v8::HandleScope scope(isolate_);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate_, context_);
@@ -106,7 +107,8 @@ TEST(KangarooIsolates) {
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- CHECK_EQ(isolate, v8::internal::Isolate::Current());
+ CHECK_EQ(reinterpret_cast<v8::internal::Isolate*>(isolate),
+ v8::internal::Isolate::Current());
CompileRun("function getValue() { return 30; }");
thread1.Reset(new KangarooThread(isolate, context));
}
@@ -184,7 +186,8 @@ class IsolateLockingThreadWithLocalContext : public JoinableThread {
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
LocalContext local_context(isolate_);
- CHECK_EQ(isolate_, v8::internal::Isolate::Current());
+ CHECK_EQ(reinterpret_cast<v8::internal::Isolate*>(isolate_),
+ v8::internal::Isolate::Current());
CalcFibAndCheck();
}
private:
@@ -221,42 +224,6 @@ TEST(IsolateLockingStress) {
isolate->Dispose();
}
-class IsolateNonlockingThread : public JoinableThread {
- public:
- IsolateNonlockingThread() : JoinableThread("IsolateNonlockingThread") {}
-
- virtual void Run() {
- v8::Isolate* isolate = v8::Isolate::New();
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- CHECK_EQ(isolate, v8::internal::Isolate::Current());
- CalcFibAndCheck();
- }
- isolate->Dispose();
- }
- private:
-};
-
-
-// Run many threads each accessing its own isolate without locking
-TEST(MultithreadedParallelIsolates) {
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
- const int kNThreads = 10;
-#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
- const int kNThreads = 4;
-#else
- const int kNThreads = 50;
-#endif
- i::List<JoinableThread*> threads(kNThreads);
- for (int i = 0; i < kNThreads; i++) {
- threads.Add(new IsolateNonlockingThread());
- }
- StartJoinAndDeleteThreads(threads);
-}
-
class IsolateNestedLockingThread : public JoinableThread {
public:
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 714ad69c8d..e4ca28212d 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -276,11 +276,11 @@ TEST(JsEntrySp) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
v8::Context::Scope context_scope(context);
- CHECK_EQ(0, i::TraceExtension::GetJsEntrySp());
+ CHECK(!i::TraceExtension::GetJsEntrySp());
CompileRun("a = 1; b = a + 1;");
- CHECK_EQ(0, i::TraceExtension::GetJsEntrySp());
+ CHECK(!i::TraceExtension::GetJsEntrySp());
CompileRun("js_entry_sp();");
- CHECK_EQ(0, i::TraceExtension::GetJsEntrySp());
+ CHECK(!i::TraceExtension::GetJsEntrySp());
CompileRun("js_entry_sp_level2();");
- CHECK_EQ(0, i::TraceExtension::GetJsEntrySp());
+ CHECK(!i::TraceExtension::GetJsEntrySp());
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index eee3e1341a..4b676d2e05 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -86,7 +86,7 @@ class ScopedLoggerInitializer {
FILE* StopLoggingGetTempFile() {
temp_file_ = logger_->TearDown();
- CHECK_NE(NULL, temp_file_);
+ CHECK(temp_file_);
fflush(temp_file_);
rewind(temp_file_);
return temp_file_;
@@ -368,7 +368,7 @@ TEST(LogCallbacks) {
"code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"method1\"",
reinterpret_cast<intptr_t>(ObjMethod1));
- CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
+ CHECK(StrNStr(log.start(), ref_data.start(), log.length()));
log.Dispose();
}
isolate->Dispose();
@@ -414,22 +414,19 @@ TEST(LogAccessorCallbacks) {
i::SNPrintF(prop1_getter_record,
"code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop1\"",
reinterpret_cast<intptr_t>(Prop1Getter));
- CHECK_NE(NULL,
- StrNStr(log.start(), prop1_getter_record.start(), log.length()));
+ CHECK(StrNStr(log.start(), prop1_getter_record.start(), log.length()));
EmbeddedVector<char, 100> prop1_setter_record;
i::SNPrintF(prop1_setter_record,
"code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"set prop1\"",
reinterpret_cast<intptr_t>(Prop1Setter));
- CHECK_NE(NULL,
- StrNStr(log.start(), prop1_setter_record.start(), log.length()));
+ CHECK(StrNStr(log.start(), prop1_setter_record.start(), log.length()));
EmbeddedVector<char, 100> prop2_getter_record;
i::SNPrintF(prop2_getter_record,
"code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop2\"",
reinterpret_cast<intptr_t>(Prop2Getter));
- CHECK_NE(NULL,
- StrNStr(log.start(), prop2_getter_record.start(), log.length()));
+ CHECK(StrNStr(log.start(), prop2_getter_record.start(), log.length()));
log.Dispose();
}
isolate->Dispose();
@@ -498,7 +495,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
if (!result->IsTrue()) {
v8::Local<v8::String> s = result->ToString(isolate);
i::ScopedVector<char> data(s->Utf8Length() + 1);
- CHECK_NE(NULL, data.start());
+ CHECK(data.start());
s->WriteUtf8(data.start());
printf("%s\n", data.start());
// Make sure that our output is written prior crash due to CHECK failure.
@@ -523,7 +520,7 @@ TEST(LogVersion) {
i::SNPrintF(ref_data, "v8-version,%d,%d,%d,%d,%d", i::Version::GetMajor(),
i::Version::GetMinor(), i::Version::GetBuild(),
i::Version::GetPatch(), i::Version::IsCandidate());
- CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
+ CHECK(StrNStr(log.start(), ref_data.start(), log.length()));
log.Dispose();
}
isolate->Dispose();
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 3ca02662fa..b655fc80f9 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -221,7 +221,7 @@ TEST(LoadAndStoreWithRepresentation) {
// Call the function from C++.
F5 f = FUNCTION_CAST<F5>(code->entry());
- CHECK_EQ(0, CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ CHECK(!CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
}
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 6cb00e4456..3a97d2137f 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -150,8 +150,7 @@ static void TestNaN(const char *code) {
double value = a->get_scalar(0);
CHECK(std::isnan(value) &&
bit_cast<uint64_t>(value) ==
- bit_cast<uint64_t>(
- i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN()));
}
diff --git a/deps/v8/test/cctest/test-migrations.cc b/deps/v8/test/cctest/test-migrations.cc
new file mode 100644
index 0000000000..2f7ff8703c
--- /dev/null
+++ b/deps/v8/test/cctest/test-migrations.cc
@@ -0,0 +1,2032 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+#include <utility>
+
+#include "src/v8.h"
+
+#include "src/code-stubs.h"
+#include "src/compilation-cache.h"
+#include "src/execution.h"
+#include "src/factory.h"
+#include "src/global-handles.h"
+#include "src/ic/stub-cache.h"
+#include "src/macro-assembler.h"
+#include "src/smart-pointers.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+// TODO(ishell): fix this once ReconfigureProperty supports "non equivalent"
+// transitions.
+const bool IS_NON_EQUIVALENT_TRANSITION_SUPPORTED = false;
+
+
+// TODO(ishell): fix this once TransitionToPrototype stops generalizing
+// all field representations (similar to crbug/448711 where elements kind
+// and observed transitions caused generalization of all field representations).
+const bool IS_PROTO_TRANS_ISSUE_FIXED = false;
+
+
+// TODO(ishell): fix this once TransitionToAccessorProperty is able to always
+// keep map in fast mode.
+const bool IS_ACCESSOR_FIELD_SUPPORTED = false;
+
+
+// Number of properties used in the tests.
+const int kPropCount = 7;
+
+
+//
+// Helper functions.
+//
+
+static Handle<String> MakeString(const char* str) {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ return factory->InternalizeUtf8String(str);
+}
+
+
+static Handle<String> MakeName(const char* str, int suffix) {
+ EmbeddedVector<char, 128> buffer;
+ SNPrintF(buffer, "%s%d", str, suffix);
+ return MakeString(buffer.start());
+}
+
+
+static Handle<AccessorPair> CreateAccessorPair(bool with_getter,
+ bool with_setter) {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Handle<AccessorPair> pair = factory->NewAccessorPair();
+ Handle<String> empty_string = factory->empty_string();
+ if (with_getter) {
+ Handle<JSFunction> func = factory->NewFunction(empty_string);
+ pair->set_getter(*func);
+ }
+ if (with_setter) {
+ Handle<JSFunction> func = factory->NewFunction(empty_string);
+ pair->set_setter(*func);
+ }
+ return pair;
+}
+
+
+static bool EqualDetails(DescriptorArray* descriptors, int descriptor,
+ PropertyType type, PropertyAttributes attributes,
+ Representation representation, int field_index = -1) {
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ if (details.type() != type) return false;
+ if (details.attributes() != attributes) return false;
+ if (!details.representation().Equals(representation)) return false;
+ if (field_index >= 0 && details.field_index() != field_index) return false;
+ return true;
+}
+
+
+class Expectations {
+ static const int MAX_PROPERTIES = 10;
+ Isolate* isolate_;
+ PropertyType types_[MAX_PROPERTIES];
+ PropertyAttributes attributes_[MAX_PROPERTIES];
+ Representation representations_[MAX_PROPERTIES];
+ // HeapType for kField, value for DATA_CONSTANT and getter for
+ // ACCESSOR_CONSTANT.
+ Handle<Object> values_[MAX_PROPERTIES];
+ // Setter for ACCESSOR_CONSTANT.
+ Handle<Object> setter_values_[MAX_PROPERTIES];
+ int number_of_properties_;
+
+ public:
+ explicit Expectations(Isolate* isolate)
+ : isolate_(isolate), number_of_properties_(0) {}
+
+ void Init(int index, PropertyType type, PropertyAttributes attributes,
+ Representation representation, Handle<Object> value) {
+ DCHECK(index < MAX_PROPERTIES);
+ types_[index] = type;
+ attributes_[index] = attributes;
+ representations_[index] = representation;
+ values_[index] = value;
+ }
+
+ void Print() const {
+ OFStream os(stdout);
+ os << "Expectations: #" << number_of_properties_ << "\n";
+ for (int i = 0; i < number_of_properties_; i++) {
+ os << " " << i << ": ";
+ os << "Descriptor @ ";
+ if (types_[i] == ACCESSOR_CONSTANT) {
+ os << "(get: " << Brief(*values_[i])
+ << ", set: " << Brief(*setter_values_[i]) << ") ";
+ } else {
+ os << Brief(*values_[i]);
+ }
+ os << " (";
+ switch (types_[i]) {
+ case DATA_CONSTANT:
+ os << "immutable ";
+ // Fall through.
+ case DATA:
+ os << "data";
+ break;
+
+ case ACCESSOR_CONSTANT:
+ os << "immutable ";
+ // Fall through.
+ case ACCESSOR:
+ os << "accessor";
+ break;
+ }
+ os << ": " << representations_[i].Mnemonic();
+ os << ", attrs: " << attributes_[i] << ")\n";
+ }
+ }
+
+ Handle<HeapType> GetFieldType(int index) {
+ CHECK(index < MAX_PROPERTIES);
+ CHECK(types_[index] == DATA || types_[index] == ACCESSOR);
+ return Handle<HeapType>::cast(values_[index]);
+ }
+
+ void SetDataField(int index, PropertyAttributes attrs,
+ Representation representation, Handle<HeapType> value) {
+ Init(index, DATA, attrs, representation, value);
+ }
+
+ void SetDataField(int index, Representation representation,
+ Handle<HeapType> value) {
+ SetDataField(index, attributes_[index], representation, value);
+ }
+
+ void SetAccessorField(int index, PropertyAttributes attrs) {
+ Init(index, ACCESSOR, attrs, Representation::Tagged(),
+ HeapType::Any(isolate_));
+ }
+
+ void SetAccessorField(int index) {
+ SetAccessorField(index, attributes_[index]);
+ }
+
+ void SetDataConstant(int index, PropertyAttributes attrs,
+ Handle<JSFunction> value) {
+ Init(index, DATA_CONSTANT, attrs, Representation::HeapObject(), value);
+ }
+
+ void SetDataConstant(int index, Handle<JSFunction> value) {
+ SetDataConstant(index, attributes_[index], value);
+ }
+
+ void SetAccessorConstant(int index, PropertyAttributes attrs,
+ Handle<Object> getter, Handle<Object> setter) {
+ Init(index, ACCESSOR_CONSTANT, attrs, Representation::Tagged(), getter);
+ setter_values_[index] = setter;
+ }
+
+ void SetAccessorConstantComponent(int index, PropertyAttributes attrs,
+ AccessorComponent component,
+ Handle<Object> accessor) {
+ CHECK_EQ(ACCESSOR_CONSTANT, types_[index]);
+ CHECK(index < number_of_properties_);
+ if (component == ACCESSOR_GETTER) {
+ values_[index] = accessor;
+ } else {
+ setter_values_[index] = accessor;
+ }
+ }
+
+ void SetAccessorConstant(int index, PropertyAttributes attrs,
+ Handle<AccessorPair> pair) {
+ Handle<Object> getter = handle(pair->getter(), isolate_);
+ Handle<Object> setter = handle(pair->setter(), isolate_);
+ SetAccessorConstant(index, attrs, getter, setter);
+ }
+
+ void SetAccessorConstant(int index, Handle<Object> getter,
+ Handle<Object> setter) {
+ SetAccessorConstant(index, attributes_[index], getter, setter);
+ }
+
+ void SetAccessorConstant(int index, Handle<AccessorPair> pair) {
+ Handle<Object> getter = handle(pair->getter(), isolate_);
+ Handle<Object> setter = handle(pair->setter(), isolate_);
+ SetAccessorConstant(index, getter, setter);
+ }
+
+ void GeneralizeRepresentation(int index) {
+ CHECK(index < number_of_properties_);
+ representations_[index] = Representation::Tagged();
+ if (types_[index] == DATA || types_[index] == ACCESSOR) {
+ values_[index] = HeapType::Any(isolate_);
+ }
+ }
+
+
+ bool Check(DescriptorArray* descriptors, int descriptor) const {
+ PropertyType type = types_[descriptor];
+ if (!EqualDetails(descriptors, descriptor, type, attributes_[descriptor],
+ representations_[descriptor])) {
+ return false;
+ }
+ Object* expected_value = *values_[descriptor];
+ Object* value = descriptors->GetValue(descriptor);
+ switch (type) {
+ case DATA:
+ case ACCESSOR:
+ return HeapType::cast(expected_value)->Equals(HeapType::cast(value));
+
+ case DATA_CONSTANT:
+ return value == expected_value;
+
+ case ACCESSOR_CONSTANT: {
+ if (value == expected_value) return true;
+ if (!value->IsAccessorPair()) return false;
+ AccessorPair* pair = AccessorPair::cast(value);
+ return pair->Equals(expected_value, *setter_values_[descriptor]);
+ }
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ bool Check(Map* map, int expected_nof) const {
+ CHECK(number_of_properties_ <= MAX_PROPERTIES);
+ CHECK_EQ(expected_nof, map->NumberOfOwnDescriptors());
+ CHECK(!map->is_dictionary_map());
+
+ DescriptorArray* descriptors = map->instance_descriptors();
+ CHECK(expected_nof <= number_of_properties_);
+ for (int i = 0; i < expected_nof; i++) {
+ if (!Check(descriptors, i)) {
+ Print();
+ Check(descriptors, i);
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool Check(Map* map) const { return Check(map, number_of_properties_); }
+
+
+ //
+ // Helper methods for initializing expectations and adding properties to
+ // given |map|.
+ //
+
+ Handle<Map> AddDataField(Handle<Map> map, PropertyAttributes attributes,
+ Representation representation,
+ Handle<HeapType> heap_type) {
+ CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
+ int property_index = number_of_properties_++;
+ SetDataField(property_index, attributes, representation, heap_type);
+
+ Handle<String> name = MakeName("prop", property_index);
+ return Map::CopyWithField(map, name, heap_type, attributes, representation,
+ INSERT_TRANSITION).ToHandleChecked();
+ }
+
+ Handle<Map> AddDataConstant(Handle<Map> map, PropertyAttributes attributes,
+ Handle<JSFunction> value) {
+ CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
+ int property_index = number_of_properties_++;
+ SetDataConstant(property_index, attributes, value);
+
+ Handle<String> name = MakeName("prop", property_index);
+ return Map::CopyWithConstant(map, name, value, attributes,
+ INSERT_TRANSITION).ToHandleChecked();
+ }
+
+ Handle<Map> TransitionToDataField(Handle<Map> map,
+ PropertyAttributes attributes,
+ Representation representation,
+ Handle<HeapType> heap_type,
+ Handle<Object> value) {
+ CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
+ int property_index = number_of_properties_++;
+ SetDataField(property_index, attributes, representation, heap_type);
+
+ Handle<String> name = MakeName("prop", property_index);
+ return Map::TransitionToDataProperty(
+ map, name, value, attributes, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ }
+
+ Handle<Map> TransitionToDataConstant(Handle<Map> map,
+ PropertyAttributes attributes,
+ Handle<JSFunction> value) {
+ CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
+ int property_index = number_of_properties_++;
+ SetDataConstant(property_index, attributes, value);
+
+ Handle<String> name = MakeName("prop", property_index);
+ return Map::TransitionToDataProperty(
+ map, name, value, attributes, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ }
+
+ Handle<Map> FollowDataTransition(Handle<Map> map,
+ PropertyAttributes attributes,
+ Representation representation,
+ Handle<HeapType> heap_type) {
+ CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
+ int property_index = number_of_properties_++;
+ SetDataField(property_index, attributes, representation, heap_type);
+
+ Handle<String> name = MakeName("prop", property_index);
+ int t = map->SearchTransition(kData, *name, attributes);
+ CHECK_NE(TransitionArray::kNotFound, t);
+ return handle(map->GetTransition(t));
+ }
+
+ Handle<Map> AddAccessorConstant(Handle<Map> map,
+ PropertyAttributes attributes,
+ Handle<AccessorPair> pair) {
+ CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
+ int property_index = number_of_properties_++;
+ SetAccessorConstant(property_index, attributes, pair);
+
+ Handle<String> name = MakeName("prop", property_index);
+
+ AccessorConstantDescriptor new_desc(name, pair, attributes);
+ return Map::CopyInsertDescriptor(map, &new_desc, INSERT_TRANSITION);
+ }
+
+ Handle<Map> AddAccessorConstant(Handle<Map> map,
+ PropertyAttributes attributes,
+ Handle<Object> getter,
+ Handle<Object> setter) {
+ CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
+ int property_index = number_of_properties_++;
+ SetAccessorConstant(property_index, attributes, getter, setter);
+
+ Handle<String> name = MakeName("prop", property_index);
+
+ CHECK(!getter->IsNull() || !setter->IsNull());
+ Factory* factory = isolate_->factory();
+
+ if (!getter->IsNull()) {
+ Handle<AccessorPair> pair = factory->NewAccessorPair();
+ pair->SetComponents(*getter, *factory->null_value());
+ AccessorConstantDescriptor new_desc(name, pair, attributes);
+ map = Map::CopyInsertDescriptor(map, &new_desc, INSERT_TRANSITION);
+ }
+ if (!setter->IsNull()) {
+ Handle<AccessorPair> pair = factory->NewAccessorPair();
+ pair->SetComponents(*getter, *setter);
+ AccessorConstantDescriptor new_desc(name, pair, attributes);
+ map = Map::CopyInsertDescriptor(map, &new_desc, INSERT_TRANSITION);
+ }
+ return map;
+ }
+
+ Handle<Map> TransitionToAccessorConstant(Handle<Map> map,
+ PropertyAttributes attributes,
+ Handle<AccessorPair> pair) {
+ CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
+ int property_index = number_of_properties_++;
+ SetAccessorConstant(property_index, attributes, pair);
+
+ Handle<String> name = MakeName("prop", property_index);
+
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<Object> getter(pair->getter(), isolate);
+ Handle<Object> setter(pair->setter(), isolate);
+
+ map = Map::TransitionToAccessorProperty(map, name, ACCESSOR_GETTER, getter,
+ attributes);
+ CHECK(!map->is_deprecated());
+ CHECK(!map->is_dictionary_map());
+
+ map = Map::TransitionToAccessorProperty(map, name, ACCESSOR_SETTER, setter,
+ attributes);
+ CHECK(!map->is_deprecated());
+ CHECK(!map->is_dictionary_map());
+ return map;
+ }
+};
+
+
+////////////////////////////////////////////////////////////////////////////////
+// A set of tests for property reconfiguration that makes new transition tree
+// branch.
+//
+
+TEST(ReconfigureAccessorToNonExistingDataField) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> none_type = HeapType::None(isolate);
+ Handle<AccessorPair> pair = CreateAccessorPair(true, true);
+
+ Expectations expectations(isolate);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ map = expectations.AddAccessorConstant(map, NONE, pair);
+
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+ Handle<Map> new_map = Map::ReconfigureProperty(
+ map, 0, kData, NONE, Representation::None(), none_type, FORCE_FIELD);
+ // |map| did not change.
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+ expectations.SetDataField(0, NONE, Representation::None(), none_type);
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(new_map->is_stable());
+ CHECK(expectations.Check(*new_map));
+
+ Handle<Map> new_map2 = Map::ReconfigureProperty(
+ map, 0, kData, NONE, Representation::None(), none_type, FORCE_FIELD);
+ CHECK_EQ(*new_map, *new_map2);
+
+ Handle<Object> value(Smi::FromInt(0), isolate);
+ Handle<Map> prepared_map = Map::PrepareForDataProperty(new_map, 0, value);
+ // None to Smi generalization is trivial, map does not change.
+ CHECK_EQ(*new_map, *prepared_map);
+
+ expectations.SetDataField(0, NONE, Representation::Smi(), any_type);
+ CHECK(prepared_map->is_stable());
+ CHECK(expectations.Check(*prepared_map));
+
+ // Now create an object with |map|, migrate it to |prepared_map| and ensure
+ // that the data property is uninitialized.
+ Factory* factory = isolate->factory();
+ Handle<JSObject> obj = factory->NewJSObjectFromMap(map);
+ JSObject::MigrateToMap(obj, prepared_map);
+ FieldIndex index = FieldIndex::ForDescriptor(*prepared_map, 0);
+ CHECK(obj->RawFastPropertyAt(index)->IsUninitialized());
+#ifdef VERIFY_HEAP
+ obj->ObjectVerify();
+#endif
+}
+
+
+// This test checks that the LookupIterator machinery involved in
+// JSObject::SetOwnPropertyIgnoreAttributes() does not try to migrate object
+// to a map with a property with None representation.
+TEST(ReconfigureAccessorToNonExistingDataFieldHeavy) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function getter() { return 1; };"
+ "function setter() {};"
+ "var o = {};"
+ "Object.defineProperty(o, 'foo', "
+ " { get: getter, set: setter, "
+ " configurable: true, enumerable: true});");
+
+ Handle<String> foo_str = factory->InternalizeUtf8String("foo");
+ Handle<String> obj_name = factory->InternalizeUtf8String("o");
+
+ Handle<Object> obj_value =
+ Object::GetProperty(isolate->global_object(), obj_name).ToHandleChecked();
+ CHECK(obj_value->IsJSObject());
+ Handle<JSObject> obj = Handle<JSObject>::cast(obj_value);
+
+ CHECK_EQ(1, obj->map()->NumberOfOwnDescriptors());
+ CHECK(obj->map()->instance_descriptors()->GetValue(0)->IsAccessorPair());
+
+ Handle<Object> value(Smi::FromInt(42), isolate);
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ obj, foo_str, value, NONE, JSObject::DONT_FORCE_FIELD).ToHandleChecked();
+
+ // Check that the property contains |value|.
+ CHECK_EQ(1, obj->map()->NumberOfOwnDescriptors());
+ FieldIndex index = FieldIndex::ForDescriptor(obj->map(), 0);
+ Object* the_value = obj->RawFastPropertyAt(index);
+ CHECK(the_value->IsSmi());
+ CHECK_EQ(42, Smi::cast(the_value)->value());
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// A set of tests for representation generalization case.
+//
+
+static void TestGeneralizeRepresentation(Representation from_representation,
+ Handle<HeapType> from_type,
+ Representation to_representation,
+ Handle<HeapType> to_type,
+ Representation expected_representation,
+ Handle<HeapType> expected_type) {
+ Isolate* isolate = CcTest::i_isolate();
+
+ Expectations expectations(isolate);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ map = expectations.AddDataField(map, NONE, from_representation, from_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+ Zone zone;
+ FakeStubForTesting stub(isolate);
+
+ // Create new maps by generalizing representation of propX field.
+ Handle<Map> maps[kPropCount];
+ for (int i = 0; i < kPropCount; i++) {
+ Handle<Map> field_owner(map->FindFieldOwner(i), isolate);
+ CompilationInfo info(&stub, isolate, &zone);
+ CHECK(!info.HasAbortedDueToDependencyChange());
+
+ Map::AddDependentCompilationInfo(field_owner,
+ DependentCode::kFieldTypeGroup, &info);
+
+ Handle<Map> new_map = Map::ReconfigureProperty(
+ map, i, kData, NONE, to_representation, to_type, FORCE_FIELD);
+ maps[i] = new_map;
+
+ expectations.SetDataField(i, expected_representation, expected_type);
+
+ CHECK(map->is_deprecated());
+ CHECK(!info.HasAbortedDueToDependencyChange());
+ info.RollbackDependencies(); // Properly cleanup compilation info.
+
+ CHECK_NE(*map, *new_map);
+ CHECK(i == 0 || maps[i - 1]->is_deprecated());
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(!new_map->is_dictionary_map());
+ CHECK(expectations.Check(*new_map));
+ }
+
+ Handle<Map> active_map = maps[kPropCount - 1];
+ CHECK(!active_map->is_deprecated());
+
+ // Update all deprecated maps and check that they are now the same.
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*active_map, *updated_map);
+ for (int i = 0; i < kPropCount; i++) {
+ updated_map = Map::Update(maps[i]);
+ CHECK_EQ(*active_map, *updated_map);
+ }
+}
+
+
+static void TestGeneralizeRepresentationTrivial(
+ Representation from_representation, Handle<HeapType> from_type,
+ Representation to_representation, Handle<HeapType> to_type,
+ Representation expected_representation, Handle<HeapType> expected_type,
+ bool expected_field_type_dependency = true) {
+ Isolate* isolate = CcTest::i_isolate();
+
+ Expectations expectations(isolate);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ map = expectations.AddDataField(map, NONE, from_representation, from_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+ Zone zone;
+ FakeStubForTesting stub(isolate);
+
+ // Create new maps by generalizing representation of propX field.
+ for (int i = 0; i < kPropCount; i++) {
+ Handle<Map> field_owner(map->FindFieldOwner(i), isolate);
+ CompilationInfo info(&stub, isolate, &zone);
+ CHECK(!info.HasAbortedDueToDependencyChange());
+
+ Map::AddDependentCompilationInfo(field_owner,
+ DependentCode::kFieldTypeGroup, &info);
+
+ Handle<Map> new_map = Map::ReconfigureProperty(
+ map, i, kData, NONE, to_representation, to_type, FORCE_FIELD);
+
+ expectations.SetDataField(i, expected_representation, expected_type);
+
+ CHECK_EQ(*map, *new_map);
+ CHECK_EQ(expected_field_type_dependency,
+ info.HasAbortedDueToDependencyChange());
+
+ info.RollbackDependencies(); // Properly cleanup compilation info.
+
+ CHECK_EQ(*map, *new_map);
+ CHECK(!new_map->is_deprecated());
+ CHECK(!new_map->is_dictionary_map());
+ CHECK(expectations.Check(*new_map));
+ }
+
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*map, *updated_map);
+}
+
+
+TEST(GeneralizeRepresentationSmiToDouble) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ TestGeneralizeRepresentation(Representation::Smi(), any_type,
+ Representation::Double(), any_type,
+ Representation::Double(), any_type);
+}
+
+
+TEST(GeneralizeRepresentationSmiToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ TestGeneralizeRepresentation(Representation::Smi(), any_type,
+ Representation::HeapObject(), value_type,
+ Representation::Tagged(), any_type);
+}
+
+
+TEST(GeneralizeRepresentationDoubleToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ TestGeneralizeRepresentation(Representation::Double(), any_type,
+ Representation::HeapObject(), value_type,
+ Representation::Tagged(), any_type);
+}
+
+
+TEST(GeneralizeRepresentationHeapObjectToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ TestGeneralizeRepresentation(Representation::HeapObject(), value_type,
+ Representation::Smi(), any_type,
+ Representation::Tagged(), any_type);
+}
+
+
+TEST(GeneralizeRepresentationHeapObjectToHeapObject) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ const int kMaxClassesPerFieldType = 1;
+ Handle<HeapType> current_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ for (int i = 0; i < kMaxClassesPerFieldType; i++) {
+ Handle<HeapType> new_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ Handle<HeapType> expected_type =
+ (i < kMaxClassesPerFieldType - 1)
+ ? HeapType::Union(current_type, new_type, isolate)
+ : any_type;
+
+ TestGeneralizeRepresentationTrivial(
+ Representation::HeapObject(), current_type,
+ Representation::HeapObject(), new_type, Representation::HeapObject(),
+ expected_type);
+ current_type = expected_type;
+ }
+
+ Handle<HeapType> new_type = HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ TestGeneralizeRepresentationTrivial(
+ Representation::HeapObject(), any_type, Representation::HeapObject(),
+ new_type, Representation::HeapObject(), any_type, false);
+}
+
+
+TEST(GeneralizeRepresentationNoneToSmi) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> none_type = HeapType::None(isolate);
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ // None -> Smi representation change is trivial.
+ TestGeneralizeRepresentationTrivial(Representation::None(), none_type,
+ Representation::Smi(), any_type,
+ Representation::Smi(), any_type);
+}
+
+
+TEST(GeneralizeRepresentationNoneToDouble) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> none_type = HeapType::None(isolate);
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ // None -> Double representation change is NOT trivial.
+ TestGeneralizeRepresentation(Representation::None(), none_type,
+ Representation::Double(), any_type,
+ Representation::Double(), any_type);
+}
+
+
+TEST(GeneralizeRepresentationNoneToHeapObject) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> none_type = HeapType::None(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ // None -> HeapObject representation change is trivial.
+ TestGeneralizeRepresentationTrivial(Representation::None(), none_type,
+ Representation::HeapObject(), value_type,
+ Representation::HeapObject(), value_type);
+}
+
+
+TEST(GeneralizeRepresentationNoneToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> none_type = HeapType::None(isolate);
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ // None -> HeapObject representation change is trivial.
+ TestGeneralizeRepresentationTrivial(Representation::None(), none_type,
+ Representation::Tagged(), any_type,
+ Representation::Tagged(), any_type);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// A set of tests for representation generalization case with kAccessor
+// properties.
+//
+
+TEST(GeneralizeRepresentationWithAccessorProperties) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<AccessorPair> pair = CreateAccessorPair(true, true);
+
+ const int kAccessorProp = kPropCount / 2;
+ Expectations expectations(isolate);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ if (i == kAccessorProp) {
+ map = expectations.AddAccessorConstant(map, NONE, pair);
+ } else {
+ map =
+ expectations.AddDataField(map, NONE, Representation::Smi(), any_type);
+ }
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+ // Create new maps by generalizing representation of propX field.
+ Handle<Map> maps[kPropCount];
+ for (int i = 0; i < kPropCount; i++) {
+ if (i == kAccessorProp) {
+ // Skip accessor property reconfiguration.
+ maps[i] = maps[i - 1];
+ continue;
+ }
+ Handle<Map> new_map = Map::ReconfigureProperty(
+ map, i, kData, NONE, Representation::Double(), any_type, FORCE_FIELD);
+ maps[i] = new_map;
+
+ expectations.SetDataField(i, Representation::Double(), any_type);
+
+ CHECK(map->is_deprecated());
+ CHECK_NE(*map, *new_map);
+ CHECK(i == 0 || maps[i - 1]->is_deprecated());
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(!new_map->is_dictionary_map());
+ CHECK(expectations.Check(*new_map));
+ }
+
+ Handle<Map> active_map = maps[kPropCount - 1];
+ CHECK(!active_map->is_deprecated());
+
+ // Update all deprecated maps and check that they are now the same.
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*active_map, *updated_map);
+ for (int i = 0; i < kPropCount; i++) {
+ updated_map = Map::Update(maps[i]);
+ CHECK_EQ(*active_map, *updated_map);
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// A set of tests for attribute reconfiguration case.
+//
+
+// This test ensures that representation/field type generalization is correctly
+// propagated from one branch of transition tree (|map2|) to another (|map|).
+//
+// + - p2B - p3 - p4: |map2|
+// |
+// {} - p0 - p1 - p2A - p3 - p4: |map|
+//
+// where "p2A" and "p2B" differ only in the attributes.
+//
+static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
+ Representation from_representation, Handle<HeapType> from_type,
+ Representation to_representation, Handle<HeapType> to_type,
+ Representation expected_representation, Handle<HeapType> expected_type) {
+ Isolate* isolate = CcTest::i_isolate();
+
+ Expectations expectations(isolate);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ map = expectations.AddDataField(map, NONE, from_representation, from_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+
+ // Create another branch in transition tree (property at index |kSplitProp|
+ // has different attributes), initialize expectations.
+ const int kSplitProp = kPropCount / 2;
+ Expectations expectations2(isolate);
+
+ Handle<Map> map2 = initial_map;
+ for (int i = 0; i < kSplitProp; i++) {
+ map2 = expectations2.FollowDataTransition(map2, NONE, from_representation,
+ from_type);
+ }
+ map2 =
+ expectations2.AddDataField(map2, READ_ONLY, to_representation, to_type);
+
+ for (int i = kSplitProp + 1; i < kPropCount; i++) {
+ map2 = expectations2.AddDataField(map2, NONE, to_representation, to_type);
+ }
+ CHECK(!map2->is_deprecated());
+ CHECK(map2->is_stable());
+ CHECK(expectations2.Check(*map2));
+
+ Zone zone;
+ FakeStubForTesting stub(isolate);
+ Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
+ CompilationInfo info(&stub, isolate, &zone);
+ CHECK(!info.HasAbortedDueToDependencyChange());
+ Map::AddDependentCompilationInfo(field_owner, DependentCode::kFieldTypeGroup,
+ &info);
+
+ // Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
+ // should generalize representations in |map1|.
+ Handle<Map> new_map =
+ Map::ReconfigureExistingProperty(map2, kSplitProp, kData, NONE);
+
+ // |map2| should be left unchanged.
+ CHECK(!map2->is_deprecated());
+ CHECK_NE(*map2, *new_map);
+ CHECK(expectations2.Check(*map2));
+
+ // |map| should be deprecated and |new_map| should match new expectations.
+ for (int i = kSplitProp; i < kPropCount; i++) {
+ expectations.SetDataField(i, expected_representation, expected_type);
+ }
+ CHECK(map->is_deprecated());
+ CHECK(!info.HasAbortedDueToDependencyChange());
+ info.RollbackDependencies(); // Properly cleanup compilation info.
+ CHECK_NE(*map, *new_map);
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(!new_map->is_dictionary_map());
+ CHECK(expectations.Check(*new_map));
+
+ // Update deprecated |map|, it should become |new_map|.
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*new_map, *updated_map);
+}
+
+
+// This test ensures that trivial representation/field type generalization
+// (from HeapObject to HeapObject) is correctly propagated from one branch of
+// transition tree (|map2|) to another (|map|).
+//
+// + - p2B - p3 - p4: |map2|
+// |
+// {} - p0 - p1 - p2A - p3 - p4: |map|
+//
+// where "p2A" and "p2B" differ only in the attributes.
+//
+static void TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
+ Representation from_representation, Handle<HeapType> from_type,
+ Representation to_representation, Handle<HeapType> to_type,
+ Representation expected_representation, Handle<HeapType> expected_type,
+ bool expected_field_type_dependency = true) {
+ Isolate* isolate = CcTest::i_isolate();
+
+ Expectations expectations(isolate);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ map = expectations.AddDataField(map, NONE, from_representation, from_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+
+ // Create another branch in transition tree (property at index |kSplitProp|
+ // has different attributes), initialize expectations.
+ const int kSplitProp = kPropCount / 2;
+ Expectations expectations2(isolate);
+
+ Handle<Map> map2 = initial_map;
+ for (int i = 0; i < kSplitProp; i++) {
+ map2 = expectations2.FollowDataTransition(map2, NONE, from_representation,
+ from_type);
+ }
+ map2 =
+ expectations2.AddDataField(map2, READ_ONLY, to_representation, to_type);
+
+ for (int i = kSplitProp + 1; i < kPropCount; i++) {
+ map2 = expectations2.AddDataField(map2, NONE, to_representation, to_type);
+ }
+ CHECK(!map2->is_deprecated());
+ CHECK(map2->is_stable());
+ CHECK(expectations2.Check(*map2));
+
+ Zone zone;
+ FakeStubForTesting stub(isolate);
+ Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
+ CompilationInfo info(&stub, isolate, &zone);
+ CHECK(!info.HasAbortedDueToDependencyChange());
+ Map::AddDependentCompilationInfo(field_owner, DependentCode::kFieldTypeGroup,
+ &info);
+
+ // Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
+ // should generalize representations in |map1|.
+ Handle<Map> new_map =
+ Map::ReconfigureExistingProperty(map2, kSplitProp, kData, NONE);
+
+ // |map2| should be left unchanged.
+ CHECK(!map2->is_deprecated());
+ CHECK_NE(*map2, *new_map);
+ CHECK(expectations2.Check(*map2));
+
+ // In trivial case |map| should be returned as a result of the property
+ // reconfiguration, respective field types should be generalized and
+ // respective code dependencies should be invalidated. |map| should be NOT
+ // deprecated and it should match new expectations.
+ for (int i = kSplitProp; i < kPropCount; i++) {
+ expectations.SetDataField(i, expected_representation, expected_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK_EQ(*map, *new_map);
+ CHECK_EQ(expected_field_type_dependency,
+ info.HasAbortedDueToDependencyChange());
+ info.RollbackDependencies(); // Properly cleanup compilation info.
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(!new_map->is_dictionary_map());
+ CHECK(expectations.Check(*new_map));
+
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*new_map, *updated_map);
+}
+
+
+TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationSmiToDouble) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
+ Representation::Smi(), any_type, Representation::Double(), any_type,
+ Representation::Double(), any_type);
+}
+
+
+TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationSmiToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
+ Representation::Smi(), any_type, Representation::HeapObject(), value_type,
+ Representation::Tagged(), any_type);
+}
+
+
+TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationDoubleToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
+ Representation::Double(), any_type, Representation::HeapObject(),
+ value_type, Representation::Tagged(), any_type);
+}
+
+
+TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationHeapObjToHeapObj) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ const int kMaxClassesPerFieldType = 1;
+ Handle<HeapType> current_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ for (int i = 0; i < kMaxClassesPerFieldType; i++) {
+ Handle<HeapType> new_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ Handle<HeapType> expected_type =
+ (i < kMaxClassesPerFieldType - 1)
+ ? HeapType::Union(current_type, new_type, isolate)
+ : any_type;
+
+ TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
+ Representation::HeapObject(), current_type,
+ Representation::HeapObject(), new_type, Representation::HeapObject(),
+ expected_type);
+ current_type = expected_type;
+ }
+
+ Handle<HeapType> new_type = HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
+ Representation::HeapObject(), any_type, Representation::HeapObject(),
+ new_type, Representation::HeapObject(), any_type, false);
+}
+
+
+TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationHeapObjectToTagged) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
+ Representation::HeapObject(), value_type, Representation::Smi(), any_type,
+ Representation::Tagged(), any_type);
+}
+
+
+// Checks that given |map| is deprecated and that it updates to given |new_map|
+// which in turn should match expectations.
+struct CheckDeprecated {
+ void Check(Handle<Map> map, Handle<Map> new_map,
+ const Expectations& expectations) {
+ CHECK(map->is_deprecated());
+ CHECK_NE(*map, *new_map);
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(!new_map->is_dictionary_map());
+ CHECK(expectations.Check(*new_map));
+
+ // Update deprecated |map|, it should become |new_map|.
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*new_map, *updated_map);
+ }
+};
+
+
+// Checks that given |map| is NOT deprecated, equals to given |new_map| and
+// matches expectations.
+struct CheckSameMap {
+ void Check(Handle<Map> map, Handle<Map> new_map,
+ const Expectations& expectations) {
+ CHECK(!map->is_deprecated());
+ CHECK_EQ(*map, *new_map);
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(!new_map->is_dictionary_map());
+ CHECK(expectations.Check(*new_map));
+
+ // Update deprecated |map|, it should become |new_map|.
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*new_map, *updated_map);
+ }
+};
+
+
+// Checks that given |map| is NOT deprecated, and |new_map| is a result of
+// copy-generalize-all-representations.
+struct CheckCopyGeneralizeAllRepresentations {
+ void Check(Handle<Map> map, Handle<Map> new_map, Expectations& expectations) {
+ CHECK(!map->is_deprecated());
+ CHECK_NE(*map, *new_map);
+
+ CHECK(new_map->GetBackPointer()->IsUndefined());
+ for (int i = 0; i < kPropCount; i++) {
+ expectations.GeneralizeRepresentation(i);
+ }
+
+ CHECK(!new_map->is_deprecated());
+ CHECK(!new_map->is_dictionary_map());
+ CHECK(expectations.Check(*new_map));
+ }
+};
+
+
+// This test ensures that representation/field type generalization is correctly
+// propagated from one branch of transition tree (|map2|) to another (|map1|).
+//
+// + - p2B - p3 - p4: |map2|
+// |
+// {} - p0 - p1: |map|
+// |
+// + - p2A - p3 - p4: |map1|
+// |
+// + - the property customized by the TestConfig provided
+//
+// where "p2A" and "p2B" differ only in the attributes.
+//
+template <typename TestConfig, typename Checker>
+static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
+ TestConfig& config, Checker& checker) {
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ const int kCustomPropIndex = kPropCount - 2;
+ Expectations expectations(isolate);
+
+ const int kSplitProp = 2;
+ CHECK(kSplitProp < kCustomPropIndex);
+
+ const Representation representation = Representation::Smi();
+
+ // Create common part of transition tree.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kSplitProp; i++) {
+ map = expectations.AddDataField(map, NONE, representation, any_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+
+ // Create branch to |map1|.
+ Handle<Map> map1 = map;
+ Expectations expectations1 = expectations;
+ for (int i = kSplitProp; i < kCustomPropIndex; i++) {
+ map1 = expectations1.AddDataField(map1, NONE, representation, any_type);
+ }
+ map1 = config.AddPropertyAtBranch(1, expectations1, map1);
+ for (int i = kCustomPropIndex + 1; i < kPropCount; i++) {
+ map1 = expectations1.AddDataField(map1, NONE, representation, any_type);
+ }
+ CHECK(!map1->is_deprecated());
+ CHECK(map1->is_stable());
+ CHECK(expectations1.Check(*map1));
+
+
+ // Create another branch in transition tree (property at index |kSplitProp|
+ // has different attributes), initialize expectations.
+ Handle<Map> map2 = map;
+ Expectations expectations2 = expectations;
+ map2 = expectations2.AddDataField(map2, READ_ONLY, representation, any_type);
+ for (int i = kSplitProp + 1; i < kCustomPropIndex; i++) {
+ map2 = expectations2.AddDataField(map2, NONE, representation, any_type);
+ }
+ map2 = config.AddPropertyAtBranch(2, expectations2, map2);
+ for (int i = kCustomPropIndex + 1; i < kPropCount; i++) {
+ map2 = expectations2.AddDataField(map2, NONE, representation, any_type);
+ }
+ CHECK(!map2->is_deprecated());
+ CHECK(map2->is_stable());
+ CHECK(expectations2.Check(*map2));
+
+
+ // Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
+ // should generalize representations in |map1|.
+ Handle<Map> new_map =
+ Map::ReconfigureExistingProperty(map2, kSplitProp, kData, NONE);
+
+ // |map2| should be left unchanged.
+ CHECK(!map2->is_deprecated());
+ CHECK_NE(*map2, *new_map);
+ CHECK(expectations2.Check(*map2));
+
+ config.UpdateExpectations(kCustomPropIndex, expectations1);
+ checker.Check(map1, new_map, expectations1);
+}
+
+
+TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ struct TestConfig {
+ Handle<JSFunction> js_func_;
+ TestConfig() {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ js_func_ = factory->NewFunction(factory->empty_string());
+ }
+
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
+ Handle<Map> map) {
+ CHECK(branch_id == 1 || branch_id == 2);
+ // Add the same data constant property at both transition tree branches.
+ return expectations.AddDataConstant(map, NONE, js_func_);
+ }
+
+ void UpdateExpectations(int property_index, Expectations& expectations) {
+ // Expectations stay the same.
+ }
+ };
+
+ TestConfig config;
+ // Two branches are "compatible" so the |map1| should NOT be deprecated.
+ CheckSameMap checker;
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+}
+
+
+TEST(ReconfigureDataFieldAttribute_DataConstantToDataFieldAfterTargetMap) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ struct TestConfig {
+ Handle<JSFunction> js_func1_;
+ Handle<JSFunction> js_func2_;
+ TestConfig() {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ js_func1_ = factory->NewFunction(factory->empty_string());
+ js_func2_ = factory->NewFunction(factory->empty_string());
+ }
+
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
+ Handle<Map> map) {
+ CHECK(branch_id == 1 || branch_id == 2);
+ Handle<JSFunction> js_func = branch_id == 1 ? js_func1_ : js_func2_;
+ return expectations.AddDataConstant(map, NONE, js_func);
+ }
+
+ void UpdateExpectations(int property_index, Expectations& expectations) {
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ expectations.SetDataField(property_index, Representation::HeapObject(),
+ any_type);
+ }
+ };
+
+ TestConfig config;
+ // Two branches are "incompatible" so the |map1| should be deprecated.
+ CheckDeprecated checker;
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+}
+
+
+TEST(ReconfigureDataFieldAttribute_SameAccessorConstantAfterTargetMap) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ struct TestConfig {
+ Handle<AccessorPair> pair_;
+ TestConfig() { pair_ = CreateAccessorPair(true, true); }
+
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
+ Handle<Map> map) {
+ CHECK(branch_id == 1 || branch_id == 2);
+ // Add the same accessor constant property at both transition tree
+ // branches.
+ return expectations.AddAccessorConstant(map, NONE, pair_);
+ }
+
+ bool UpdateExpectations(int property_index, Expectations& expectations) {
+ // Two branches are "compatible" so the |map1| should NOT be deprecated.
+ return false;
+ }
+ };
+
+ TestConfig config;
+ CheckSameMap checker;
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+}
+
+
+TEST(ReconfigureDataFieldAttribute_AccConstantToAccFieldAfterTargetMap) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ struct TestConfig {
+ Handle<AccessorPair> pair1_;
+ Handle<AccessorPair> pair2_;
+ TestConfig() {
+ pair1_ = CreateAccessorPair(true, true);
+ pair2_ = CreateAccessorPair(true, true);
+ }
+
+ Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
+ Handle<Map> map) {
+ CHECK(branch_id == 1 || branch_id == 2);
+ Handle<AccessorPair> pair = branch_id == 1 ? pair1_ : pair2_;
+ return expectations.AddAccessorConstant(map, NONE, pair);
+ }
+
+ void UpdateExpectations(int property_index, Expectations& expectations) {
+ if (IS_ACCESSOR_FIELD_SUPPORTED) {
+ expectations.SetAccessorField(property_index);
+ } else {
+ // Currently we have a copy-generalize-all-representations case and
+ // ACCESSOR property becomes ACCESSOR_CONSTANT.
+ expectations.SetAccessorConstant(property_index, pair2_);
+ }
+ }
+ };
+
+ TestConfig config;
+ if (IS_ACCESSOR_FIELD_SUPPORTED) {
+ CheckCopyGeneralizeAllRepresentations checker;
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+ } else {
+ // Currently we have a copy-generalize-all-representations case.
+ CheckCopyGeneralizeAllRepresentations checker;
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// A set of tests checking split map deprecation.
+//
+
+TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ Expectations expectations(isolate);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ map = expectations.AddDataField(map, NONE, Representation::Smi(), any_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+
+ // Generalize representation of property at index |kSplitProp|.
+ const int kSplitProp = kPropCount / 2;
+ Handle<Map> split_map;
+ Handle<Map> map2 = initial_map;
+ {
+ for (int i = 0; i < kSplitProp + 1; i++) {
+ if (i == kSplitProp) {
+ split_map = map2;
+ }
+
+ Handle<String> name = MakeName("prop", i);
+ int t = map2->SearchTransition(kData, *name, NONE);
+ CHECK_NE(TransitionArray::kNotFound, t);
+ map2 = handle(map2->GetTransition(t));
+ }
+
+ map2 = Map::ReconfigureProperty(map2, kSplitProp, kData, NONE,
+ Representation::Double(), any_type,
+ FORCE_FIELD);
+ expectations.SetDataField(kSplitProp, Representation::Double(), any_type);
+
+ CHECK(expectations.Check(*split_map, kSplitProp));
+ CHECK(expectations.Check(*map2, kSplitProp + 1));
+ }
+
+ // At this point |map| should be deprecated and disconnected from the
+ // transition tree.
+ CHECK(map->is_deprecated());
+ CHECK(!split_map->is_deprecated());
+ CHECK(!map2->is_deprecated());
+
+ // Fill in transition tree of |map2| so that it can't have more transitions.
+ for (int i = 0; i < TransitionArray::kMaxNumberOfTransitions; i++) {
+ CHECK(map2->CanHaveMoreTransitions());
+ Handle<String> name = MakeName("foo", i);
+ Map::CopyWithField(map2, name, any_type, NONE, Representation::Smi(),
+ INSERT_TRANSITION).ToHandleChecked();
+ }
+ CHECK(!map2->CanHaveMoreTransitions());
+
+ // Try to update |map|, since there is no place for propX transition at |map2|
+ // |map| should become "copy-generalized".
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK(updated_map->GetBackPointer()->IsUndefined());
+
+ for (int i = 0; i < kPropCount; i++) {
+ expectations.SetDataField(i, Representation::Tagged(), any_type);
+ }
+ CHECK(expectations.Check(*updated_map));
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// A set of tests involving special transitions (such as elements kind
+// transition, observed transition or prototype transition).
+//
+
+// This test ensures that representation/field type generalization is correctly
+// propagated from one branch of transition tree (|map2|) to another (|map|).
+//
+// p4B: |map2|
+// |
+// * - special transition
+// |
+// {} - p0 - p1 - p2A - p3 - p4A: |map|
+//
+// where "p4A" and "p4B" are exactly the same properties.
+//
+// TODO(ishell): unify this test template with
+// TestReconfigureDataFieldAttribute_GeneralizeRepresentation once
+// IS_PROTO_TRANS_ISSUE_FIXED and IS_NON_EQUIVALENT_TRANSITION_SUPPORTED are
+// fixed.
+template <typename TestConfig>
+static void TestGeneralizeRepresentationWithSpecialTransition(
+ TestConfig& config, Representation from_representation,
+ Handle<HeapType> from_type, Representation to_representation,
+ Handle<HeapType> to_type, Representation expected_representation,
+ Handle<HeapType> expected_type) {
+ Isolate* isolate = CcTest::i_isolate();
+
+ Expectations expectations(isolate);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ map = expectations.AddDataField(map, NONE, from_representation, from_type);
+ }
+ CHECK(!map->is_deprecated());
+ CHECK(map->is_stable());
+ CHECK(expectations.Check(*map));
+
+ // Apply some special transition to |map|.
+ CHECK(map->owns_descriptors());
+ Handle<Map> map2 = config.Transition(map);
+
+ // |map| should still match expectations.
+ CHECK(!map->is_deprecated());
+ CHECK(expectations.Check(*map));
+
+ Expectations expectations2 = expectations;
+ if (config.generalizes_representations()) {
+ for (int i = 0; i < kPropCount; i++) {
+ expectations2.GeneralizeRepresentation(i);
+ }
+ }
+
+ CHECK(!map2->is_deprecated());
+ CHECK(map2->is_stable());
+ CHECK(expectations2.Check(*map2));
+
+ // Create new maps by generalizing representation of propX field.
+ Handle<Map> maps[kPropCount];
+ for (int i = 0; i < kPropCount; i++) {
+ Handle<Map> new_map = Map::ReconfigureProperty(
+ map, i, kData, NONE, to_representation, to_type, FORCE_FIELD);
+ maps[i] = new_map;
+
+ expectations.SetDataField(i, expected_representation, expected_type);
+
+ CHECK(map->is_deprecated());
+ CHECK_NE(*map, *new_map);
+ CHECK(i == 0 || maps[i - 1]->is_deprecated());
+ CHECK(expectations.Check(*new_map));
+
+ Handle<Map> new_map2 = Map::Update(map2);
+ CHECK(!new_map2->is_deprecated());
+ CHECK(!new_map2->is_dictionary_map());
+
+ if (!IS_NON_EQUIVALENT_TRANSITION_SUPPORTED) {
+ // In case of non-equivalent transition currently we generalize all
+ // representations.
+ for (int i = 0; i < kPropCount; i++) {
+ expectations2.GeneralizeRepresentation(i);
+ }
+ CHECK(new_map2->GetBackPointer()->IsUndefined());
+ CHECK(expectations2.Check(*new_map2));
+ } else {
+ CHECK(expectations.Check(*new_map2));
+ }
+ }
+
+ Handle<Map> active_map = maps[kPropCount - 1];
+ CHECK(!active_map->is_deprecated());
+
+ // Update all deprecated maps and check that they are now the same.
+ Handle<Map> updated_map = Map::Update(map);
+ CHECK_EQ(*active_map, *updated_map);
+ for (int i = 0; i < kPropCount; i++) {
+ updated_map = Map::Update(maps[i]);
+ CHECK_EQ(*active_map, *updated_map);
+ }
+}
+
+
+TEST(ElementsKindTransitionFromMapOwningDescriptor) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ struct TestConfig {
+ Handle<Map> Transition(Handle<Map> map) {
+ return Map::CopyAsElementsKind(map, DICTIONARY_ELEMENTS,
+ INSERT_TRANSITION);
+ }
+ // TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
+ bool generalizes_representations() const { return false; }
+ };
+ TestConfig config;
+ TestGeneralizeRepresentationWithSpecialTransition(
+ config, Representation::Smi(), any_type, Representation::HeapObject(),
+ value_type, Representation::Tagged(), any_type);
+}
+
+
+TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ struct TestConfig {
+ Handle<Map> Transition(Handle<Map> map) {
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ // Add one more transition to |map| in order to prevent descriptors
+ // ownership.
+ CHECK(map->owns_descriptors());
+ Map::CopyWithField(map, MakeString("foo"), any_type, NONE,
+ Representation::Smi(),
+ INSERT_TRANSITION).ToHandleChecked();
+ CHECK(!map->owns_descriptors());
+
+ return Map::CopyAsElementsKind(map, DICTIONARY_ELEMENTS,
+ INSERT_TRANSITION);
+ }
+ // TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
+ bool generalizes_representations() const { return false; }
+ };
+ TestConfig config;
+ TestGeneralizeRepresentationWithSpecialTransition(
+ config, Representation::Smi(), any_type, Representation::HeapObject(),
+ value_type, Representation::Tagged(), any_type);
+}
+
+
+TEST(ForObservedTransitionFromMapOwningDescriptor) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ struct TestConfig {
+ Handle<Map> Transition(Handle<Map> map) {
+ return Map::CopyForObserved(map);
+ }
+ // TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
+ bool generalizes_representations() const { return false; }
+ };
+ TestConfig config;
+ TestGeneralizeRepresentationWithSpecialTransition(
+ config, Representation::Smi(), any_type, Representation::HeapObject(),
+ value_type, Representation::Tagged(), any_type);
+}
+
+
+TEST(ForObservedTransitionFromMapNotOwningDescriptor) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ struct TestConfig {
+ Handle<Map> Transition(Handle<Map> map) {
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ // Add one more transition to |map| in order to prevent descriptors
+ // ownership.
+ CHECK(map->owns_descriptors());
+ Map::CopyWithField(map, MakeString("foo"), any_type, NONE,
+ Representation::Smi(),
+ INSERT_TRANSITION).ToHandleChecked();
+ CHECK(!map->owns_descriptors());
+
+ return Map::CopyForObserved(map);
+ }
+ // TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
+ bool generalizes_representations() const { return false; }
+ };
+ TestConfig config;
+ TestGeneralizeRepresentationWithSpecialTransition(
+ config, Representation::Smi(), any_type, Representation::HeapObject(),
+ value_type, Representation::Tagged(), any_type);
+}
+
+
+TEST(PrototypeTransitionFromMapOwningDescriptor) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ struct TestConfig {
+ Handle<JSObject> prototype_;
+
+ TestConfig() {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
+ }
+
+ Handle<Map> Transition(Handle<Map> map) {
+ return Map::TransitionToPrototype(map, prototype_, REGULAR_PROTOTYPE);
+ }
+ // TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
+ bool generalizes_representations() const {
+ return !IS_PROTO_TRANS_ISSUE_FIXED;
+ }
+ };
+ TestConfig config;
+ TestGeneralizeRepresentationWithSpecialTransition(
+ config, Representation::Smi(), any_type, Representation::HeapObject(),
+ value_type, Representation::Tagged(), any_type);
+}
+
+
+TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> value_type =
+ HeapType::Class(Map::Create(isolate, 0), isolate);
+
+ struct TestConfig {
+ Handle<JSObject> prototype_;
+
+ TestConfig() {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
+ }
+
+ Handle<Map> Transition(Handle<Map> map) {
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ // Add one more transition to |map| in order to prevent descriptors
+ // ownership.
+ CHECK(map->owns_descriptors());
+ Map::CopyWithField(map, MakeString("foo"), any_type, NONE,
+ Representation::Smi(),
+ INSERT_TRANSITION).ToHandleChecked();
+ CHECK(!map->owns_descriptors());
+
+ return Map::TransitionToPrototype(map, prototype_, REGULAR_PROTOTYPE);
+ }
+ // TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
+ bool generalizes_representations() const {
+ return !IS_PROTO_TRANS_ISSUE_FIXED;
+ }
+ };
+ TestConfig config;
+ TestGeneralizeRepresentationWithSpecialTransition(
+ config, Representation::Smi(), any_type, Representation::HeapObject(),
+ value_type, Representation::Tagged(), any_type);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// A set of tests for higher level transitioning mechanics.
+//
+
+struct TransitionToDataFieldOperator {
+ Representation representation_;
+ PropertyAttributes attributes_;
+ Handle<HeapType> heap_type_;
+ Handle<Object> value_;
+
+ TransitionToDataFieldOperator(Representation representation,
+ Handle<HeapType> heap_type,
+ Handle<Object> value,
+ PropertyAttributes attributes = NONE)
+ : representation_(representation),
+ attributes_(attributes),
+ heap_type_(heap_type),
+ value_(value) {}
+
+ Handle<Map> DoTransition(Expectations& expectations, Handle<Map> map) {
+ return expectations.TransitionToDataField(map, attributes_, representation_,
+ heap_type_, value_);
+ }
+};
+
+
+struct TransitionToDataConstantOperator {
+ PropertyAttributes attributes_;
+ Handle<JSFunction> value_;
+
+ TransitionToDataConstantOperator(Handle<JSFunction> value,
+ PropertyAttributes attributes = NONE)
+ : attributes_(attributes), value_(value) {}
+
+ Handle<Map> DoTransition(Expectations& expectations, Handle<Map> map) {
+ return expectations.TransitionToDataConstant(map, attributes_, value_);
+ }
+};
+
+
+struct TransitionToAccessorConstantOperator {
+ PropertyAttributes attributes_;
+ Handle<AccessorPair> pair_;
+
+ TransitionToAccessorConstantOperator(Handle<AccessorPair> pair,
+ PropertyAttributes attributes = NONE)
+ : attributes_(attributes), pair_(pair) {}
+
+ Handle<Map> DoTransition(Expectations& expectations, Handle<Map> map) {
+ return expectations.TransitionToAccessorConstant(map, attributes_, pair_);
+ }
+};
+
+
+struct ReconfigureAsDataPropertyOperator {
+ int descriptor_;
+ Representation representation_;
+ PropertyAttributes attributes_;
+ Handle<HeapType> heap_type_;
+
+ ReconfigureAsDataPropertyOperator(int descriptor,
+ Representation representation,
+ Handle<HeapType> heap_type,
+ PropertyAttributes attributes = NONE)
+ : descriptor_(descriptor),
+ representation_(representation),
+ attributes_(attributes),
+ heap_type_(heap_type) {}
+
+ Handle<Map> DoTransition(Expectations& expectations, Handle<Map> map) {
+ expectations.SetDataField(descriptor_, representation_, heap_type_);
+ return Map::ReconfigureExistingProperty(map, descriptor_, kData,
+ attributes_);
+ }
+};
+
+
+struct ReconfigureAsAccessorPropertyOperator {
+ int descriptor_;
+ PropertyAttributes attributes_;
+
+ ReconfigureAsAccessorPropertyOperator(int descriptor,
+ PropertyAttributes attributes = NONE)
+ : descriptor_(descriptor), attributes_(attributes) {}
+
+ Handle<Map> DoTransition(Expectations& expectations, Handle<Map> map) {
+ expectations.SetAccessorField(descriptor_);
+ return Map::ReconfigureExistingProperty(map, descriptor_, kAccessor,
+ attributes_);
+ }
+};
+
+
+// Checks that representation/field type generalization happened.
+struct FieldGeneralizationChecker {
+ int descriptor_;
+ Representation representation_;
+ PropertyAttributes attributes_;
+ Handle<HeapType> heap_type_;
+
+ FieldGeneralizationChecker(int descriptor, Representation representation,
+ Handle<HeapType> heap_type,
+ PropertyAttributes attributes = NONE)
+ : descriptor_(descriptor),
+ representation_(representation),
+ attributes_(attributes),
+ heap_type_(heap_type) {}
+
+ void Check(Expectations& expectations2, Handle<Map> map1, Handle<Map> map2) {
+ CHECK(!map2->is_deprecated());
+
+ CHECK(map1->is_deprecated());
+ CHECK_NE(*map1, *map2);
+ Handle<Map> updated_map = Map::Update(map1);
+ CHECK_EQ(*map2, *updated_map);
+
+ expectations2.SetDataField(descriptor_, representation_, heap_type_);
+ CHECK(expectations2.Check(*map2));
+ }
+};
+
+
+// Checks that existing transition was taken as is.
+struct SameMapChecker {
+ void Check(Expectations& expectations, Handle<Map> map1, Handle<Map> map2) {
+ CHECK(!map2->is_deprecated());
+ CHECK_EQ(*map1, *map2);
+ CHECK(expectations.Check(*map2));
+ }
+};
+
+
+// Checks that both |map1| and |map2| should stays non-deprecated, this is
+// the case when property kind is change.
+struct PropertyKindReconfigurationChecker {
+ void Check(Expectations& expectations, Handle<Map> map1, Handle<Map> map2) {
+ CHECK(!map1->is_deprecated());
+ CHECK(!map2->is_deprecated());
+ CHECK_NE(*map1, *map2);
+ CHECK(expectations.Check(*map2));
+ }
+};
+
+
+// This test transitions to various property types under different
+// circumstances.
+// Plan:
+// 1) create a |map| with p0..p3 properties.
+// 2) create |map1| by adding "p4" to |map0|.
+// 3) create |map2| by transition to "p4" from |map0|.
+//
+// + - p4B: |map2|
+// |
+// {} - p0 - p1 - pA - p3: |map|
+// |
+// + - p4A: |map1|
+//
+// where "p4A" and "p4B" differ only in the attributes.
+//
+template <typename TransitionOp1, typename TransitionOp2, typename Checker>
+static void TestTransitionTo(TransitionOp1& transition_op1,
+ TransitionOp2& transition_op2, Checker& checker) {
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ Expectations expectations(isolate);
+
+ // Create a map, add required properties to it and initialize expectations.
+ Handle<Map> initial_map = Map::Create(isolate, 0);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount - 1; i++) {
+ map = expectations.AddDataField(map, NONE, Representation::Smi(), any_type);
+ }
+ CHECK(expectations.Check(*map));
+
+ Expectations expectations1 = expectations;
+ Handle<Map> map1 = transition_op1.DoTransition(expectations1, map);
+ CHECK(expectations1.Check(*map1));
+
+ Expectations expectations2 = expectations;
+ Handle<Map> map2 = transition_op2.DoTransition(expectations2, map);
+
+ // Let the test customization do the check.
+ checker.Check(expectations2, map1, map2);
+}
+
+
+TEST(TransitionDataFieldToDataField) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ Handle<Object> value1 = handle(Smi::FromInt(0), isolate);
+ TransitionToDataFieldOperator transition_op1(Representation::Smi(), any_type,
+ value1);
+
+ Handle<Object> value2 = isolate->factory()->NewHeapNumber(0);
+ TransitionToDataFieldOperator transition_op2(Representation::Double(),
+ any_type, value2);
+
+ FieldGeneralizationChecker checker(kPropCount - 1, Representation::Double(),
+ any_type);
+ TestTransitionTo(transition_op1, transition_op2, checker);
+}
+
+
+TEST(TransitionDataConstantToSameDataConstant) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+
+ Handle<JSFunction> js_func = factory->NewFunction(factory->empty_string());
+ TransitionToDataConstantOperator transition_op(js_func);
+
+ SameMapChecker checker;
+ TestTransitionTo(transition_op, transition_op, checker);
+}
+
+
+TEST(TransitionDataConstantToAnotherDataConstant) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ Handle<JSFunction> js_func1 = factory->NewFunction(factory->empty_string());
+ TransitionToDataConstantOperator transition_op1(js_func1);
+
+ Handle<JSFunction> js_func2 = factory->NewFunction(factory->empty_string());
+ TransitionToDataConstantOperator transition_op2(js_func2);
+
+ FieldGeneralizationChecker checker(kPropCount - 1,
+ Representation::HeapObject(), any_type);
+ TestTransitionTo(transition_op1, transition_op2, checker);
+}
+
+
+TEST(TransitionDataConstantToDataField) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ Handle<JSFunction> js_func1 = factory->NewFunction(factory->empty_string());
+ TransitionToDataConstantOperator transition_op1(js_func1);
+
+ Handle<Object> value2 = isolate->factory()->NewHeapNumber(0);
+ TransitionToDataFieldOperator transition_op2(Representation::Double(),
+ any_type, value2);
+
+ FieldGeneralizationChecker checker(kPropCount - 1, Representation::Tagged(),
+ any_type);
+ TestTransitionTo(transition_op1, transition_op2, checker);
+}
+
+
+TEST(TransitionAccessorConstantToSameAccessorConstant) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ Handle<AccessorPair> pair = CreateAccessorPair(true, true);
+ TransitionToAccessorConstantOperator transition_op(pair);
+
+ SameMapChecker checker;
+ TestTransitionTo(transition_op, transition_op, checker);
+}
+
+
+// TODO(ishell): add this test once IS_ACCESSOR_FIELD_SUPPORTED is supported.
+// TEST(TransitionAccessorConstantToAnotherAccessorConstant)
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index 2766a4f239..64634ad79a 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -328,11 +328,10 @@ TEST(APITestBasicMutation) {
// Setting an indexed element via the property setting method
obj->Set(Number::New(v8_isolate, 1), Number::New(v8_isolate, 5));
// Setting with a non-String, non-uint32 key
- obj->ForceSet(Number::New(v8_isolate, 1.1), Number::New(v8_isolate, 6),
- DontDelete);
+ obj->Set(Number::New(v8_isolate, 1.1), Number::New(v8_isolate, 6));
obj->Delete(String::NewFromUtf8(v8_isolate, "foo"));
obj->Delete(1);
- obj->ForceDelete(Number::New(v8_isolate, 1.1));
+ obj->Delete(Number::New(v8_isolate, 1.1));
// Force delivery
// TODO(adamk): Should the above set methods trigger delivery themselves?
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 08caeab55f..1a17475ada 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -157,7 +157,11 @@ TEST(ScanHTMLEndComments) {
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::PreParser preparser(&scanner, &log, stack_limit);
+ i::Zone zone;
+ i::AstValueFactory ast_value_factory(
+ &zone, CcTest::i_isolate()->heap()->HashSeed());
+ i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
+ stack_limit);
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -171,7 +175,11 @@ TEST(ScanHTMLEndComments) {
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::PreParser preparser(&scanner, &log, stack_limit);
+ i::Zone zone;
+ i::AstValueFactory ast_value_factory(
+ &zone, CcTest::i_isolate()->heap()->HashSeed());
+ i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
+ stack_limit);
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
@@ -317,7 +325,11 @@ TEST(StandAlonePreParser) {
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::PreParser preparser(&scanner, &log, stack_limit);
+ i::Zone zone;
+ i::AstValueFactory ast_value_factory(
+ &zone, CcTest::i_isolate()->heap()->HashSeed());
+ i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
+ stack_limit);
preparser.set_allow_lazy(true);
preparser.set_allow_natives(true);
preparser.set_allow_harmony_arrow_functions(true);
@@ -351,7 +363,11 @@ TEST(StandAlonePreParserNoNatives) {
scanner.Initialize(&stream);
// Preparser defaults to disallowing natives syntax.
- i::PreParser preparser(&scanner, &log, stack_limit);
+ i::Zone zone;
+ i::AstValueFactory ast_value_factory(
+ &zone, CcTest::i_isolate()->heap()->HashSeed());
+ i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
+ stack_limit);
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -375,7 +391,7 @@ TEST(PreparsingObjectLiterals) {
v8::Local<v8::Value> result = ParserCacheCompileRun(source);
CHECK(result->IsString());
v8::String::Utf8Value utf8(result);
- CHECK_EQ("foo", *utf8);
+ CHECK_EQ(0, strcmp("foo", *utf8));
}
{
@@ -383,7 +399,7 @@ TEST(PreparsingObjectLiterals) {
v8::Local<v8::Value> result = ParserCacheCompileRun(source);
CHECK(result->IsString());
v8::String::Utf8Value utf8(result);
- CHECK_EQ("foo", *utf8);
+ CHECK_EQ(0, strcmp("foo", *utf8));
}
{
@@ -391,7 +407,7 @@ TEST(PreparsingObjectLiterals) {
v8::Local<v8::Value> result = ParserCacheCompileRun(source);
CHECK(result->IsString());
v8::String::Utf8Value utf8(result);
- CHECK_EQ("foo", *utf8);
+ CHECK_EQ(0, strcmp("foo", *utf8));
}
}
@@ -416,7 +432,10 @@ TEST(RegressChromium62639) {
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::PreParser preparser(&scanner, &log,
+ i::Zone zone;
+ i::AstValueFactory ast_value_factory(&zone,
+ CcTest::i_isolate()->heap()->HashSeed());
+ i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
CcTest::i_isolate()->stack_guard()->real_climit());
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
@@ -448,7 +467,10 @@ TEST(Regress928) {
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::PreParser preparser(&scanner, &log,
+ i::Zone zone;
+ i::AstValueFactory ast_value_factory(&zone,
+ CcTest::i_isolate()->heap()->HashSeed());
+ i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
CcTest::i_isolate()->stack_guard()->real_climit());
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
@@ -497,7 +519,11 @@ TEST(PreParseOverflow) {
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- i::PreParser preparser(&scanner, &log, stack_limit);
+ i::Zone zone;
+ i::AstValueFactory ast_value_factory(&zone,
+ CcTest::i_isolate()->heap()->HashSeed());
+ i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
+ stack_limit);
preparser.set_allow_lazy(true);
preparser.set_allow_harmony_arrow_functions(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
@@ -696,7 +722,7 @@ TEST(Utf8CharacterStream) {
int i = 0;
while (stream.pos() < kMaxUC16CharU) {
CHECK_EQU(i, stream.pos());
- unsigned progress = stream.SeekForward(12);
+ int progress = static_cast<int>(stream.SeekForward(12));
i += progress;
int32_t c = stream.Advance();
if (i <= kMaxUC16Char) {
@@ -805,7 +831,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
CHECK(scanner.ScanRegExpPattern(start == i::Token::ASSIGN_DIV));
scanner.Next(); // Current token is now the regexp literal.
- i::Zone zone(CcTest::i_isolate());
+ i::Zone zone;
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
ast_value_factory.Internalize(CcTest::i_isolate());
@@ -926,74 +952,70 @@ TEST(ScopeUsesArgumentsSuperThis) {
const char* suffix;
} surroundings[] = {
{ "function f() {", "}" },
- { "var f = () => {", "}" },
+ { "var f = () => {", "};" },
+ { "class C { constructor() {", "} }" },
};
enum Expected {
NONE = 0,
ARGUMENTS = 1,
- SUPER_PROPERTY = 2,
- SUPER_CONSTRUCTOR_CALL = 4,
- THIS = 8,
- INNER_ARGUMENTS = 16,
- INNER_SUPER_PROPERTY = 32,
- INNER_SUPER_CONSTRUCTOR_CALL = 64,
- INNER_THIS = 128
+ SUPER_PROPERTY = 1 << 1,
+ THIS = 1 << 2,
+ INNER_ARGUMENTS = 1 << 3,
+ INNER_SUPER_PROPERTY = 1 << 4,
+ INNER_THIS = 1 << 5
};
static const struct {
const char* body;
int expected;
} source_data[] = {
- {"", NONE},
- {"return this", THIS},
- {"return arguments", ARGUMENTS},
- {"return super()", SUPER_CONSTRUCTOR_CALL},
- {"return super.x", SUPER_PROPERTY},
- {"return arguments[0]", ARGUMENTS},
- {"return this + arguments[0]", ARGUMENTS | THIS},
- {"return this + arguments[0] + super.x",
- ARGUMENTS | SUPER_PROPERTY | THIS},
- {"return x => this + x", INNER_THIS},
- {"return x => super() + x", INNER_SUPER_CONSTRUCTOR_CALL},
- {"this.foo = 42;", THIS},
- {"this.foo();", THIS},
- {"if (foo()) { this.f() }", THIS},
- {"if (foo()) { super.f() }", SUPER_PROPERTY},
- {"if (arguments.length) { this.f() }", ARGUMENTS | THIS},
- {"while (true) { this.f() }", THIS},
- {"while (true) { super.f() }", SUPER_PROPERTY},
- {"if (true) { while (true) this.foo(arguments) }", ARGUMENTS | THIS},
- // Multiple nesting levels must work as well.
- {"while (true) { while (true) { while (true) return this } }", THIS},
- {"while (true) { while (true) { while (true) return super() } }",
- SUPER_CONSTRUCTOR_CALL},
- {"if (1) { return () => { while (true) new this() } }", INNER_THIS},
- {"if (1) { return () => { while (true) new super() } }", NONE},
- {"if (1) { return () => { while (true) new new super() } }", NONE},
- // Note that propagation of the inner_uses_this() value does not
- // cross boundaries of normal functions onto parent scopes.
- {"return function (x) { return this + x }", NONE},
- {"return function (x) { return super() + x }", NONE},
- {"var x = function () { this.foo = 42 };", NONE},
- {"var x = function () { super.foo = 42 };", NONE},
- {"if (1) { return function () { while (true) new this() } }", NONE},
- {"if (1) { return function () { while (true) new super() } }", NONE},
- {"return function (x) { return () => this }", NONE},
- {"return function (x) { return () => super() }", NONE},
- // Flags must be correctly set when using block scoping.
- {"\"use strict\"; while (true) { let x; this, arguments; }",
- INNER_ARGUMENTS | INNER_THIS},
- {"\"use strict\"; while (true) { let x; this, super(), arguments; }",
- INNER_ARGUMENTS | INNER_SUPER_CONSTRUCTOR_CALL | INNER_THIS},
- {"\"use strict\"; if (foo()) { let x; this.f() }", INNER_THIS},
- {"\"use strict\"; if (foo()) { let x; super.f() }",
- INNER_SUPER_PROPERTY},
- {"\"use strict\"; if (1) {"
- " let x; return function () { return this + super() + arguments }"
- "}",
- NONE},
- };
+ {"", NONE},
+ {"return this", THIS},
+ {"return arguments", ARGUMENTS},
+ {"return super.x", SUPER_PROPERTY},
+ {"return arguments[0]", ARGUMENTS},
+ {"return this + arguments[0]", ARGUMENTS | THIS},
+ {"return this + arguments[0] + super.x",
+ ARGUMENTS | SUPER_PROPERTY | THIS},
+ {"return x => this + x", INNER_THIS},
+ {"return x => super.f() + x", INNER_SUPER_PROPERTY},
+ {"this.foo = 42;", THIS},
+ {"this.foo();", THIS},
+ {"if (foo()) { this.f() }", THIS},
+ {"if (foo()) { super.f() }", SUPER_PROPERTY},
+ {"if (arguments.length) { this.f() }", ARGUMENTS | THIS},
+ {"while (true) { this.f() }", THIS},
+ {"while (true) { super.f() }", SUPER_PROPERTY},
+ {"if (true) { while (true) this.foo(arguments) }", ARGUMENTS | THIS},
+ // Multiple nesting levels must work as well.
+ {"while (true) { while (true) { while (true) return this } }", THIS},
+ {"while (true) { while (true) { while (true) return super.f() } }",
+ SUPER_PROPERTY},
+ {"if (1) { return () => { while (true) new this() } }", INNER_THIS},
+ // Note that propagation of the inner_uses_this() value does not
+ // cross boundaries of normal functions onto parent scopes.
+ {"return function (x) { return this + x }", NONE},
+ {"return { m(x) { return super.m() + x } }", NONE},
+ {"var x = function () { this.foo = 42 };", NONE},
+ {"var x = { m() { super.foo = 42 } };", NONE},
+ {"if (1) { return function () { while (true) new this() } }", NONE},
+ {"if (1) { return { m() { while (true) super.m() } } }", NONE},
+ {"return function (x) { return () => this }", NONE},
+ {"return { m(x) { return () => super.m() } }", NONE},
+ // Flags must be correctly set when using block scoping.
+ {"\"use strict\"; while (true) { let x; this, arguments; }",
+ INNER_ARGUMENTS | INNER_THIS},
+ {"\"use strict\"; while (true) { let x; this, super.f(), arguments; }",
+ INNER_ARGUMENTS | INNER_SUPER_PROPERTY | INNER_THIS},
+ {"\"use strict\"; if (foo()) { let x; this.f() }", INNER_THIS},
+ {"\"use strict\"; if (foo()) { let x; super.f() }",
+ INNER_SUPER_PROPERTY},
+ {"\"use strict\"; if (1) {"
+ " let x; return { m() { return this + super.m() + arguments } }"
+ "}",
+ NONE},
+ };
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -1007,6 +1029,12 @@ TEST(ScopeUsesArgumentsSuperThis) {
for (unsigned j = 0; j < arraysize(surroundings); ++j) {
for (unsigned i = 0; i < arraysize(source_data); ++i) {
+ // Super property is only allowed in constructor and method.
+ if (((source_data[i].expected & SUPER_PROPERTY) ||
+ (source_data[i].expected & INNER_SUPER_PROPERTY) ||
+ (source_data[i].expected == NONE)) && j != 2) {
+ continue;
+ }
int kProgramByteSize = i::StrLength(surroundings[j].prefix) +
i::StrLength(surroundings[j].suffix) +
i::StrLength(source_data[i].body);
@@ -1018,15 +1046,15 @@ TEST(ScopeUsesArgumentsSuperThis) {
.ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
- i::Parser::ParseInfo parse_info = {isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(),
- isolate->unicode_cache()};
- i::Parser parser(&info, &parse_info);
+ i::Parser parser(&info, isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(), isolate->unicode_cache());
parser.set_allow_harmony_arrow_functions(true);
parser.set_allow_harmony_classes(true);
+ parser.set_allow_harmony_object_literals(true);
parser.set_allow_harmony_scoping(true);
+ parser.set_allow_harmony_sloppy(true);
info.MarkAsGlobal();
- parser.Parse();
+ CHECK(parser.Parse(&info));
CHECK(i::Rewriter::Rewrite(&info));
CHECK(i::Scope::Analyze(&info));
CHECK(info.function() != NULL);
@@ -1036,19 +1064,20 @@ TEST(ScopeUsesArgumentsSuperThis) {
CHECK_EQ(1, script_scope->inner_scopes()->length());
i::Scope* scope = script_scope->inner_scopes()->at(0);
+ // Adjust for constructor scope.
+ if (j == 2) {
+ CHECK_EQ(1, scope->inner_scopes()->length());
+ scope = scope->inner_scopes()->at(0);
+ }
CHECK_EQ((source_data[i].expected & ARGUMENTS) != 0,
scope->uses_arguments());
CHECK_EQ((source_data[i].expected & SUPER_PROPERTY) != 0,
scope->uses_super_property());
- CHECK_EQ((source_data[i].expected & SUPER_CONSTRUCTOR_CALL) != 0,
- scope->uses_super_constructor_call());
CHECK_EQ((source_data[i].expected & THIS) != 0, scope->uses_this());
CHECK_EQ((source_data[i].expected & INNER_ARGUMENTS) != 0,
scope->inner_uses_arguments());
CHECK_EQ((source_data[i].expected & INNER_SUPER_PROPERTY) != 0,
scope->inner_uses_super_property());
- CHECK_EQ((source_data[i].expected & INNER_SUPER_CONSTRUCTOR_CALL) != 0,
- scope->inner_uses_super_constructor_call());
CHECK_EQ((source_data[i].expected & INNER_THIS) != 0,
scope->inner_uses_this());
}
@@ -1070,7 +1099,7 @@ TEST(ScopePositions) {
const char* inner_source;
const char* outer_suffix;
i::ScopeType scope_type;
- i::StrictMode strict_mode;
+ i::LanguageMode language_mode;
};
const SourceData source_data[] = {
@@ -1269,16 +1298,14 @@ TEST(ScopePositions) {
CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
- i::Parser::ParseInfo parse_info = {isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(),
- isolate->unicode_cache()};
- i::Parser parser(&info, &parse_info);
+ i::Parser parser(&info, isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(), isolate->unicode_cache());
parser.set_allow_lazy(true);
parser.set_allow_harmony_scoping(true);
parser.set_allow_harmony_arrow_functions(true);
info.MarkAsGlobal();
- info.SetStrictMode(source_data[i].strict_mode);
- parser.Parse();
+ info.SetLanguageMode(source_data[i].language_mode);
+ parser.Parse(&info);
CHECK(info.function() != NULL);
// Check scope types and positions.
@@ -1356,9 +1383,12 @@ enum ParserFlag {
kAllowHarmonyArrowFunctions,
kAllowHarmonyClasses,
kAllowHarmonyObjectLiterals,
+ kAllowHarmonyRestParameters,
kAllowHarmonyTemplates,
kAllowHarmonySloppy,
- kAllowHarmonyUnicode
+ kAllowHarmonyUnicode,
+ kAllowHarmonyComputedPropertyNames,
+ kAllowStrongMode
};
@@ -1383,8 +1413,13 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
flags.Contains(kAllowHarmonyArrowFunctions));
parser->set_allow_harmony_classes(flags.Contains(kAllowHarmonyClasses));
parser->set_allow_harmony_templates(flags.Contains(kAllowHarmonyTemplates));
+ parser->set_allow_harmony_rest_params(
+ flags.Contains(kAllowHarmonyRestParameters));
parser->set_allow_harmony_sloppy(flags.Contains(kAllowHarmonySloppy));
parser->set_allow_harmony_unicode(flags.Contains(kAllowHarmonyUnicode));
+ parser->set_allow_harmony_computed_property_names(
+ flags.Contains(kAllowHarmonyComputedPropertyNames));
+ parser->set_allow_strong_mode(flags.Contains(kAllowStrongMode));
}
@@ -1403,7 +1438,11 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
{
i::Scanner scanner(isolate->unicode_cache());
i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
- i::PreParser preparser(&scanner, &log, stack_limit);
+ i::Zone zone;
+ i::AstValueFactory ast_value_factory(
+ &zone, CcTest::i_isolate()->heap()->HashSeed());
+ i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
+ stack_limit);
SetParserFlags(&preparser, flags);
scanner.Initialize(&stream);
i::PreParser::PreParseResult result = preparser.PreParseProgram(
@@ -1418,13 +1457,11 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
{
i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
- i::Parser::ParseInfo parse_info = {isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(),
- isolate->unicode_cache()};
- i::Parser parser(&info, &parse_info);
+ i::Parser parser(&info, isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(), isolate->unicode_cache());
SetParserFlags(&parser, flags);
info.MarkAsGlobal();
- parser.Parse();
+ parser.Parse(&info);
function = info.function();
if (function) {
parser_materialized_literals = function->materialized_literal_count();
@@ -1667,8 +1704,9 @@ TEST(StrictOctal) {
v8::Script::Compile(v8::String::NewFromUtf8(CcTest::isolate(), script));
CHECK(try_catch.HasCaught());
v8::String::Utf8Value exception(try_catch.Exception());
- CHECK_EQ("SyntaxError: Octal literals are not allowed in strict mode.",
- *exception);
+ CHECK_EQ(0,
+ strcmp("SyntaxError: Octal literals are not allowed in strict mode.",
+ *exception));
}
@@ -1753,10 +1791,11 @@ TEST(ErrorsEvalAndArguments) {
// ok to use "eval" or "arguments" as identifiers. With the strict mode, it
// isn't.
const char* context_data[][2] = {
- { "\"use strict\";", "" },
- { "var eval; function test_func() {\"use strict\"; ", "}"},
- { NULL, NULL }
- };
+ {"\"use strict\";", ""},
+ {"\"use strong\";", ""},
+ {"var eval; function test_func() {\"use strict\"; ", "}"},
+ {"var eval; function test_func() {\"use strong\"; ", "}"},
+ {NULL, NULL}};
const char* statement_data[] = {
"var eval;",
@@ -1786,7 +1825,9 @@ TEST(ErrorsEvalAndArguments) {
NULL
};
- RunParserSyncTest(context_data, statement_data, kError);
+ static const ParserFlag always_flags[] = {kAllowStrongMode};
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
}
@@ -1893,18 +1934,22 @@ TEST(ErrorsFutureStrictReservedWords) {
// it's ok to use future strict reserved words as identifiers. With the strict
// mode, it isn't.
const char* context_data[][2] = {
- { "function test_func() {\"use strict\"; ", "}"},
- { "() => { \"use strict\"; ", "}" },
- { NULL, NULL }
- };
+ {"function test_func() {\"use strict\"; ", "}"},
+ {"() => { \"use strict\"; ", "}"},
+ {"function test_func() {\"use strong\"; ", "}"},
+ {"() => { \"use strong\"; ", "}"},
+ {NULL, NULL}};
const char* statement_data[] {
LIMITED_FUTURE_STRICT_RESERVED_WORDS(FUTURE_STRICT_RESERVED_STATEMENTS)
NULL
};
- RunParserSyncTest(context_data, statement_data, kError);
- RunParserSyncTest(context_data, statement_data, kError);
+ static const ParserFlag always_flags[] = {kAllowStrongMode};
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
}
@@ -2082,15 +2127,21 @@ TEST(NoErrorsYieldSloppyGeneratorsEnabled) {
TEST(ErrorsYieldStrict) {
const char* context_data[][2] = {
- { "\"use strict\";", "" },
- { "\"use strict\"; function not_gen() {", "}" },
- { "function test_func() {\"use strict\"; ", "}"},
- { "\"use strict\"; function * gen() { function not_gen() {", "} }" },
- { "\"use strict\"; (function not_gen() {", "})" },
- { "\"use strict\"; (function * gen() { (function not_gen() {", "}) })" },
- { "() => {\"use strict\"; ", "}" },
- { NULL, NULL }
- };
+ {"\"use strict\";", ""},
+ {"\"use strict\"; function not_gen() {", "}"},
+ {"function test_func() {\"use strict\"; ", "}"},
+ {"\"use strict\"; function * gen() { function not_gen() {", "} }"},
+ {"\"use strict\"; (function not_gen() {", "})"},
+ {"\"use strict\"; (function * gen() { (function not_gen() {", "}) })"},
+ {"() => {\"use strict\"; ", "}"},
+ {"\"use strong\";", ""},
+ {"\"use strong\"; function not_gen() {", "}"},
+ {"function test_func() {\"use strong\"; ", "}"},
+ {"\"use strong\"; function * gen() { function not_gen() {", "} }"},
+ {"\"use strong\"; (function not_gen() {", "})"},
+ {"\"use strong\"; (function * gen() { (function not_gen() {", "}) })"},
+ {"() => {\"use strong\"; ", "}"},
+ {NULL, NULL}};
const char* statement_data[] = {
"var yield;",
@@ -2110,7 +2161,9 @@ TEST(ErrorsYieldStrict) {
NULL
};
- RunParserSyncTest(context_data, statement_data, kError);
+ static const ParserFlag always_flags[] = {kAllowStrongMode};
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
}
@@ -2228,8 +2281,10 @@ TEST(ErrorsNameOfStrictFunction) {
const char* context_data[][2] = {
{ "function ", ""},
{ "\"use strict\"; function", ""},
+ { "\"use strong\"; function", ""},
{ "function * ", ""},
{ "\"use strict\"; function * ", ""},
+ { "\"use strong\"; function * ", ""},
{ NULL, NULL }
};
@@ -2239,12 +2294,14 @@ TEST(ErrorsNameOfStrictFunction) {
"interface() {\"use strict\";}",
"yield() {\"use strict\";}",
// Future reserved words are always illegal
- "function super() { }",
- "function super() {\"use strict\";}",
+ "super() { }",
+ "super() {\"use strict\";}",
NULL
};
- RunParserSyncTest(context_data, statement_data, kError);
+ static const ParserFlag always_flags[] = {kAllowStrongMode};
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
}
@@ -2305,11 +2362,13 @@ TEST(ErrorsIllegalWordsAsLabelsSloppy) {
TEST(ErrorsIllegalWordsAsLabelsStrict) {
// Tests that illegal tokens as labels produce the correct errors.
const char* context_data[][2] = {
- { "\"use strict\";", "" },
- { "function test_func() {\"use strict\"; ", "}"},
- { "() => {\"use strict\"; ", "}" },
- { NULL, NULL }
- };
+ {"\"use strict\";", ""},
+ {"function test_func() {\"use strict\"; ", "}"},
+ {"() => {\"use strict\"; ", "}"},
+ {"\"use strong\";", ""},
+ {"function test_func() {\"use strong\"; ", "}"},
+ {"() => {\"use strong\"; ", "}"},
+ {NULL, NULL}};
#define LABELLED_WHILE(NAME) #NAME ": while (true) { break " #NAME "; }",
const char* statement_data[] = {
@@ -2319,7 +2378,9 @@ TEST(ErrorsIllegalWordsAsLabelsStrict) {
};
#undef LABELLED_WHILE
- RunParserSyncTest(context_data, statement_data, kError);
+ static const ParserFlag always_flags[] = {kAllowStrongMode};
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
}
@@ -2396,10 +2457,13 @@ TEST(NoErrorsParenthesizedDirectivePrologue) {
const char* statement_data[] = {
"(\"use strict\"); var eval;",
+ "(\"use strong\"); var eval;",
NULL
};
- RunParserSyncTest(context_data, statement_data, kSuccess);
+ static const ParserFlag always_flags[] = {kAllowStrongMode};
+ RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
}
@@ -2463,7 +2527,8 @@ TEST(DontRegressPreParserDataSizes) {
// No functions.
{"var x = 42;", 0},
// Functions.
- {"function foo() {}", 1}, {"function foo() {} function bar() {}", 2},
+ {"function foo() {}", 1},
+ {"function foo() {} function bar() {}", 2},
// Getter / setter functions are recorded as functions if they're on the top
// level.
{"var x = {get foo(){} };", 1},
@@ -2482,7 +2547,7 @@ TEST(DontRegressPreParserDataSizes) {
i::CompilationInfoWithZone info(script);
i::ScriptData* sd = NULL;
info.SetCachedData(&sd, v8::ScriptCompiler::kProduceParserCache);
- i::Parser::Parse(&info, true);
+ i::Parser::ParseStatic(&info, true);
i::ParseData* pd = i::ParseData::FromCachedData(sd);
if (pd->FunctionCount() != test_cases[i].functions) {
@@ -2523,6 +2588,7 @@ TEST(FunctionDeclaresItselfStrict) {
const char* strict_statement_data[] = {
"\"use strict\";",
+ "\"use strong\";",
NULL
};
@@ -2531,8 +2597,11 @@ TEST(FunctionDeclaresItselfStrict) {
NULL
};
- RunParserSyncTest(context_data, strict_statement_data, kError);
- RunParserSyncTest(context_data, non_strict_statement_data, kSuccess);
+ static const ParserFlag always_flags[] = {kAllowStrongMode};
+ RunParserSyncTest(context_data, strict_statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, non_strict_statement_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
}
@@ -2684,12 +2753,9 @@ TEST(ErrorsNewExpression) {
TEST(StrictObjectLiteralChecking) {
- const char* strict_context_data[][2] = {
+ const char* context_data[][2] = {
{"\"use strict\"; var myobject = {", "};"},
{"\"use strict\"; var myobject = {", ",};"},
- { NULL, NULL }
- };
- const char* non_strict_context_data[][2] = {
{"var myobject = {", "};"},
{"var myobject = {", ",};"},
{ NULL, NULL }
@@ -2707,8 +2773,7 @@ TEST(StrictObjectLiteralChecking) {
NULL
};
- RunParserSyncTest(non_strict_context_data, statement_data, kSuccess);
- RunParserSyncTest(strict_context_data, statement_data, kError);
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2720,36 +2785,17 @@ TEST(ErrorsObjectLiteralChecking) {
};
const char* statement_data[] = {
- ",",
- "foo: 1, get foo() {}",
- "foo: 1, set foo(v) {}",
- "\"foo\": 1, get \"foo\"() {}",
- "\"foo\": 1, set \"foo\"(v) {}",
- "1: 1, get 1() {}",
- "1: 1, set 1() {}",
- "get foo() {}, get foo() {}",
- "set foo(_) {}, set foo(_) {}",
- // It's counter-intuitive, but these collide too (even in classic
- // mode). Note that we can have "foo" and foo as properties in classic
- // mode,
- // but we cannot have "foo" and get foo, or foo and get "foo".
- "foo: 1, get \"foo\"() {}",
- "foo: 1, set \"foo\"(v) {}",
- "\"foo\": 1, get foo() {}",
- "\"foo\": 1, set foo(v) {}",
- "1: 1, get \"1\"() {}",
- "1: 1, set \"1\"() {}",
- "\"1\": 1, get 1() {}"
- "\"1\": 1, set 1(v) {}"
- // Wrong number of parameters
- "get bar(x) {}",
- "get bar(x, y) {}",
- "set bar() {}",
- "set bar(x, y) {}",
- // Parsing FunctionLiteral for getter or setter fails
- "get foo( +",
- "get foo() \"error\"",
- NULL};
+ ",",
+ // Wrong number of parameters
+ "get bar(x) {}",
+ "get bar(x, y) {}",
+ "set bar() {}",
+ "set bar(x, y) {}",
+ // Parsing FunctionLiteral for getter or setter fails
+ "get foo( +",
+ "get foo() \"error\"",
+ NULL
+ };
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2765,6 +2811,22 @@ TEST(NoErrorsObjectLiteralChecking) {
};
const char* statement_data[] = {
+ "foo: 1, get foo() {}",
+ "foo: 1, set foo(v) {}",
+ "\"foo\": 1, get \"foo\"() {}",
+ "\"foo\": 1, set \"foo\"(v) {}",
+ "1: 1, get 1() {}",
+ "1: 1, set 1(v) {}",
+ "get foo() {}, get foo() {}",
+ "set foo(_) {}, set foo(v) {}",
+ "foo: 1, get \"foo\"() {}",
+ "foo: 1, set \"foo\"(v) {}",
+ "\"foo\": 1, get foo() {}",
+ "\"foo\": 1, set foo(v) {}",
+ "1: 1, get \"1\"() {}",
+ "1: 1, set \"1\"(v) {}",
+ "\"1\": 1, get 1() {}",
+ "\"1\": 1, set 1(v) {}",
"foo: 1, bar: 2",
"\"foo\": 1, \"bar\": 2",
"1: 1, 2: 2",
@@ -2829,6 +2891,11 @@ TEST(TooManyArguments) {
TEST(StrictDelete) {
// "delete <Identifier>" is not allowed in strict mode.
+ const char* strong_context_data[][2] = {
+ {"\"use strong\"; ", ""},
+ { NULL, NULL }
+ };
+
const char* strict_context_data[][2] = {
{"\"use strict\"; ", ""},
{ NULL, NULL }
@@ -2870,14 +2937,27 @@ TEST(StrictDelete) {
NULL
};
- RunParserSyncTest(strict_context_data, sloppy_statement_data, kError);
- RunParserSyncTest(sloppy_context_data, sloppy_statement_data, kSuccess);
+ static const ParserFlag always_flags[] = {kAllowStrongMode};
+ RunParserSyncTest(strong_context_data, sloppy_statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, sloppy_statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(sloppy_context_data, sloppy_statement_data, kSuccess, NULL,
+ 0, always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, good_statement_data, kSuccess);
- RunParserSyncTest(sloppy_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(strong_context_data, good_statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, good_statement_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(sloppy_context_data, good_statement_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, bad_statement_data, kError);
- RunParserSyncTest(sloppy_context_data, bad_statement_data, kError);
+ RunParserSyncTest(strong_context_data, bad_statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, bad_statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(sloppy_context_data, bad_statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
}
@@ -3093,7 +3173,7 @@ TEST(SerializationOfMaybeAssignmentFlag) {
i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
source->PrintOn(stdout);
printf("\n");
- i::Zone zone(isolate);
+ i::Zone zone;
v8::Local<v8::Value> v = CompileRun(src);
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
@@ -3104,9 +3184,10 @@ TEST(SerializationOfMaybeAssignmentFlag) {
i::Handle<i::String> str = name->string();
CHECK(str->IsInternalizedString());
i::Scope* script_scope =
- new (&zone) i::Scope(NULL, i::SCRIPT_SCOPE, &avf, &zone);
+ new (&zone) i::Scope(&zone, NULL, i::SCRIPT_SCOPE, &avf);
script_scope->Initialize();
- i::Scope* s = i::Scope::DeserializeScopeChain(context, script_scope, &zone);
+ i::Scope* s =
+ i::Scope::DeserializeScopeChain(isolate, &zone, context, script_scope);
DCHECK(s != script_scope);
DCHECK(name != NULL);
@@ -3142,7 +3223,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
source->PrintOn(stdout);
printf("\n");
- i::Zone zone(isolate);
+ i::Zone zone;
v8::Local<v8::Value> v = CompileRun(src);
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
@@ -3151,9 +3232,10 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
avf.Internalize(isolate);
i::Scope* script_scope =
- new (&zone) i::Scope(NULL, i::SCRIPT_SCOPE, &avf, &zone);
+ new (&zone) i::Scope(&zone, NULL, i::SCRIPT_SCOPE, &avf);
script_scope->Initialize();
- i::Scope* s = i::Scope::DeserializeScopeChain(context, script_scope, &zone);
+ i::Scope* s =
+ i::Scope::DeserializeScopeChain(isolate, &zone, context, script_scope);
DCHECK(s != script_scope);
const i::AstRawString* name_x = avf.GetOneByteString("x");
@@ -3189,7 +3271,7 @@ TEST(ExportsMaybeAssigned) {
i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
source->PrintOn(stdout);
printf("\n");
- i::Zone zone(isolate);
+ i::Zone zone;
v8::Local<v8::Value> v = CompileRun(src);
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
@@ -3198,9 +3280,10 @@ TEST(ExportsMaybeAssigned) {
avf.Internalize(isolate);
i::Scope* script_scope =
- new (&zone) i::Scope(NULL, i::SCRIPT_SCOPE, &avf, &zone);
+ new (&zone) i::Scope(&zone, NULL, i::SCRIPT_SCOPE, &avf);
script_scope->Initialize();
- i::Scope* s = i::Scope::DeserializeScopeChain(context, script_scope, &zone);
+ i::Scope* s =
+ i::Scope::DeserializeScopeChain(isolate, &zone, context, script_scope);
DCHECK(s != script_scope);
const i::AstRawString* name_x = avf.GetOneByteString("x");
const i::AstRawString* name_f = avf.GetOneByteString("f");
@@ -3349,12 +3432,11 @@ TEST(InnerAssignment) {
i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
- i::Parser::ParseInfo parse_info = {
- isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(), isolate->unicode_cache()};
- i::Parser parser(&info, &parse_info);
+ i::Parser parser(&info, isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(),
+ isolate->unicode_cache());
parser.set_allow_harmony_scoping(true);
- CHECK(parser.Parse());
+ CHECK(parser.Parse(&info));
CHECK(i::Compiler::Analyze(&info));
CHECK(info.function() != NULL);
@@ -3562,45 +3644,224 @@ TEST(NoErrorsArrowFunctions) {
}
-TEST(NoErrorsSuper) {
+TEST(SuperNoErrors) {
// Tests that parser and preparser accept 'super' keyword in right places.
- const char* context_data[][2] = {{"", ";"},
- {"k = ", ";"},
- {"foo(", ");"},
- {NULL, NULL}};
+ const char* context_data[][2] = {
+ {"class C { m() { ", "; } }"},
+ {"class C { m() { k = ", "; } }"},
+ {"class C { m() { foo(", "); } }"},
+ {"class C { m() { () => ", "; } }"},
+ {NULL, NULL}
+ };
const char* statement_data[] = {
"super.x",
"super[27]",
+ "new super.x",
+ "new super.x()",
+ "new super[27]",
+ "new super[27]()",
+ "z.super", // Ok, property lookup.
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyArrowFunctions,
+ kAllowHarmonyClasses,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonySloppy
+ };
+ RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(SuperErrors) {
+ const char* context_data[][2] = {
+ {"class C { m() { ", "; } }"},
+ {"class C { m() { k = ", "; } }"},
+ {"class C { m() { foo(", "); } }"},
+ {"class C { m() { () => ", "; } }"},
+ {NULL, NULL}
+ };
+
+ const char* expression_data[] = {
+ "super",
+ "super = x",
+ "y = super",
+ "f(super)",
"new super",
"new super()",
"new super(12, 45)",
"new new super",
"new new super()",
"new new super()()",
- "z.super", // Ok, property lookup.
- NULL};
+ NULL
+ };
- static const ParserFlag always_flags[] = {kAllowHarmonyClasses};
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyClasses,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonySloppy
+ };
+ RunParserSyncTest(context_data, expression_data, kError, NULL, 0,
always_flags, arraysize(always_flags));
}
-TEST(ErrorsSuper) {
- // Tests that parser and preparser generate same errors for 'super'.
- const char* context_data[][2] = {{"", ";"},
- {"k = ", ";"},
- {"foo(", ");"},
+TEST(SuperCall) {
+ const char* context_data[][2] = {{"", ""},
{NULL, NULL}};
+ const char* success_data[] = {
+ "class C extends B { constructor() { super(); } }",
+ "class C extends B { constructor() { () => super(); } }",
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyArrowFunctions,
+ kAllowHarmonyClasses,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonySloppy
+ };
+ RunParserSyncTest(context_data, success_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ const char* error_data[] = {
+ "class C { constructor() { super(); } }",
+ "class C { method() { super(); } }",
+ "class C { method() { () => super(); } }",
+ "class C { *method() { super(); } }",
+ "class C { get x() { super(); } }",
+ "class C { set x(_) { super(); } }",
+ "({ method() { super(); } })",
+ "({ *method() { super(); } })",
+ "({ get x() { super(); } })",
+ "({ set x(_) { super(); } })",
+ "({ f: function() { super(); } })",
+ "(function() { super(); })",
+ "var f = function() { super(); }",
+ "({ f: function*() { super(); } })",
+ "(function*() { super(); })",
+ "var f = function*() { super(); }",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, error_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(SuperNewNoErrors) {
+ const char* context_data[][2] = {
+ {"class C { constructor() { ", " } }"},
+ {"class C { *method() { ", " } }"},
+ {"class C { get x() { ", " } }"},
+ {"class C { set x(_) { ", " } }"},
+ {"({ method() { ", " } })"},
+ {"({ *method() { ", " } })"},
+ {"({ get x() { ", " } })"},
+ {"({ set x(_) { ", " } })"},
+ {NULL, NULL}
+ };
+
+ const char* expression_data[] = {
+ "new super.x;",
+ "new super.x();",
+ "() => new super.x;",
+ "() => new super.x();",
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyArrowFunctions,
+ kAllowHarmonyClasses,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonySloppy
+ };
+ RunParserSyncTest(context_data, expression_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(SuperNewErrors) {
+ const char* context_data[][2] = {
+ {"class C { method() { ", " } }"},
+ {"class C { *method() { ", " } }"},
+ {"class C { get x() { ", " } }"},
+ {"class C { set x(_) { ", " } }"},
+ {"({ method() { ", " } })"},
+ {"({ *method() { ", " } })"},
+ {"({ get x() { ", " } })"},
+ {"({ set x(_) { ", " } })"},
+ {"({ f: function() { ", " } })"},
+ {"(function() { ", " })"},
+ {"var f = function() { ", " }"},
+ {"({ f: function*() { ", " } })"},
+ {"(function*() { ", " })"},
+ {"var f = function*() { ", " }"},
+ {NULL, NULL}
+ };
+
const char* statement_data[] = {
+ "new super;",
+ "new super();",
+ "() => new super;",
+ "() => new super();",
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyArrowFunctions,
+ kAllowHarmonyClasses,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonySloppy
+ };
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(SuperErrorsNonMethods) {
+ // super is only allowed in methods, accessors and constructors.
+ const char* context_data[][2] = {
+ {"", ";"},
+ {"k = ", ";"},
+ {"foo(", ");"},
+ {"if (", ") {}"},
+ {"if (true) {", "}"},
+ {"if (false) {} else {", "}"},
+ {"while (true) {", "}"},
+ {"function f() {", "}"},
+ {"class C extends (", ") {}"},
+ {"class C { m() { function f() {", "} } }"},
+ {"({ m() { function f() {", "} } })"},
+ {NULL, NULL}
+ };
+
+ const char* statement_data[] = {
+ "super",
"super = x",
"y = super",
"f(super)",
- NULL};
+ "super.x",
+ "super[27]",
+ "super.x()",
+ "super[27]()",
+ "super()",
+ "new super.x",
+ "new super.x()",
+ "new super[27]",
+ "new super[27]()",
+ NULL
+ };
- static const ParserFlag always_flags[] = {kAllowHarmonyClasses};
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyClasses,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonySloppy
+ };
RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
always_flags, arraysize(always_flags));
}
@@ -3714,8 +3975,6 @@ TEST(MethodDefinitionStrictFormalParamereters) {
const char* params_data[] = {
"x, x",
"x, y, x",
- "eval",
- "arguments",
"var",
"const",
NULL
@@ -3727,9 +3986,58 @@ TEST(MethodDefinitionStrictFormalParamereters) {
}
+TEST(MethodDefinitionEvalArguments) {
+ const char* strict_context_data[][2] =
+ {{"'use strict'; ({method(", "){}});"},
+ {"'use strict'; ({*method(", "){}});"},
+ {NULL, NULL}};
+ const char* sloppy_context_data[][2] =
+ {{"({method(", "){}});"},
+ {"({*method(", "){}});"},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "eval",
+ "arguments",
+ NULL};
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+
+ // Fail in strict mode
+ RunParserSyncTest(strict_context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+
+ // OK in sloppy mode
+ RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(MethodDefinitionDuplicateEvalArguments) {
+ const char* context_data[][2] =
+ {{"'use strict'; ({method(", "){}});"},
+ {"'use strict'; ({*method(", "){}});"},
+ {"({method(", "){}});"},
+ {"({*method(", "){}});"},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "eval, eval",
+ "eval, a, eval",
+ "arguments, arguments",
+ "arguments, a, arguments",
+ NULL};
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+
+ // In strict mode, the error is using "eval" or "arguments" as parameter names
+ // In sloppy mode, the error is that eval / arguments are duplicated
+ RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
TEST(MethodDefinitionDuplicateProperty) {
- // Duplicate properties are allowed in ES6 but we haven't removed that check
- // yet.
const char* context_data[][2] = {{"'use strict'; ({", "});"},
{NULL, NULL}};
@@ -3760,7 +4068,7 @@ TEST(MethodDefinitionDuplicateProperty) {
};
static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
- RunParserSyncTest(context_data, params_data, kError, NULL, 0,
+ RunParserSyncTest(context_data, params_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
}
@@ -3791,9 +4099,9 @@ TEST(ClassExpressionNoErrors) {
TEST(ClassDeclarationNoErrors) {
- const char* context_data[][2] = {{"", ""},
- {"{", "}"},
- {"if (true) {", "}"},
+ const char* context_data[][2] = {{"'use strict'; ", ""},
+ {"'use strict'; {", "}"},
+ {"'use strict'; if (true) {", "}"},
{NULL, NULL}};
const char* statement_data[] = {
"class name {}",
@@ -3804,7 +4112,7 @@ TEST(ClassDeclarationNoErrors) {
NULL};
static const ParserFlag always_flags[] = {
- kAllowHarmonyClasses, kAllowHarmonySloppy};
+ kAllowHarmonyClasses, kAllowHarmonyScoping};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
}
@@ -4571,6 +4879,119 @@ TEST(TemplateLiteralsIllegalTokens) {
}
+TEST(ParseRestParameters) {
+ const char* context_data[][2] = {{"'use strict';(function(",
+ "){ return args;})(1, [], /regexp/, 'str',"
+ "function(){});"},
+ {"(function(", "){ return args;})(1, [],"
+ "/regexp/, 'str', function(){});"},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "...args",
+ "a, ...args",
+ "... args",
+ "a, ... args",
+ "...\targs",
+ "a, ...\targs",
+ "...\r\nargs",
+ "a, ...\r\nargs",
+ "...\rargs",
+ "a, ...\rargs",
+ "...\t\n\t\t\n args",
+ "a, ... \n \n args",
+ NULL};
+ static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters};
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(ParseRestParametersErrors) {
+ const char* context_data[][2] = {{"'use strict';(function(",
+ "){ return args;}(1, [], /regexp/, 'str',"
+ "function(){});"},
+ {"(function(", "){ return args;}(1, [],"
+ "/regexp/, 'str', function(){});"},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "...args, b",
+ "a, ...args, b",
+ "...args, b",
+ "a, ...args, b",
+ "...args,\tb",
+ "a,...args\t,b",
+ "...args\r\n, b",
+ "a, ... args,\r\nb",
+ "...args\r,b",
+ "a, ... args,\rb",
+ "...args\t\n\t\t\n, b",
+ "a, ... args, \n \n b",
+ "a, a, ...args",
+ "a,\ta, ...args",
+ "a,\ra, ...args",
+ "a,\na, ...args",
+ NULL};
+ static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters};
+ RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(RestParametersEvalArguments) {
+ const char* strict_context_data[][2] =
+ {{"'use strict';(function(",
+ "){ return;})(1, [], /regexp/, 'str',function(){});"},
+ {NULL, NULL}};
+ const char* sloppy_context_data[][2] =
+ {{"(function(",
+ "){ return;})(1, [],/regexp/, 'str', function(){});"},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "...eval",
+ "eval, ...args",
+ "...arguments",
+ "arguments, ...args",
+ NULL};
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters};
+
+ // Fail in strict mode
+ RunParserSyncTest(strict_context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+
+ // OK in sloppy mode
+ RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(RestParametersDuplicateEvalArguments) {
+ const char* context_data[][2] =
+ {{"'use strict';(function(",
+ "){ return;})(1, [], /regexp/, 'str',function(){});"},
+ {"(function(",
+ "){ return;})(1, [],/regexp/, 'str', function(){});"},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "eval, ...eval",
+ "eval, eval, ...args",
+ "arguments, ...arguments",
+ "arguments, arguments, ...args",
+ NULL};
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters};
+
+ // In strict mode, the error is using "eval" or "arguments" as parameter names
+ // In sloppy mode, the error is that eval / arguments are duplicated
+ RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
TEST(LexicalScopingSloppyMode) {
const char* context_data[][2] = {
{"", ""},
@@ -4604,3 +5025,548 @@ TEST(LexicalScopingSloppyMode) {
always_true_flags, arraysize(always_true_flags),
always_false_flags, arraysize(always_false_flags));
}
+
+
+TEST(ComputedPropertyName) {
+ const char* context_data[][2] = {{"({[", "]: 1});"},
+ {"({get [", "]() {}});"},
+ {"({set [", "](_) {}});"},
+ {"({[", "]() {}});"},
+ {"({*[", "]() {}});"},
+ {"(class {get [", "]() {}});"},
+ {"(class {set [", "](_) {}});"},
+ {"(class {[", "]() {}});"},
+ {"(class {*[", "]() {}});"},
+ {NULL, NULL}};
+ const char* error_data[] = {
+ "1, 2",
+ "var name",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyClasses,
+ kAllowHarmonyComputedPropertyNames,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonySloppy,
+ };
+ RunParserSyncTest(context_data, error_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ const char* name_data[] = {
+ "1",
+ "1 + 2",
+ "'name'",
+ "\"name\"",
+ "[]",
+ "{}",
+ NULL};
+
+ RunParserSyncTest(context_data, name_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(ComputedPropertyNameShorthandError) {
+ const char* context_data[][2] = {{"({", "});"},
+ {NULL, NULL}};
+ const char* error_data[] = {
+ "a: 1, [2]",
+ "[1], a: 1",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyClasses,
+ kAllowHarmonyComputedPropertyNames,
+ kAllowHarmonyObjectLiterals,
+ kAllowHarmonySloppy,
+ };
+ RunParserSyncTest(context_data, error_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(BasicImportExportParsing) {
+ const char* kSources[] = {
+ "export let x = 0;",
+ "export var y = 0;",
+ "export const z = 0;",
+ "export function func() { };",
+ "export class C { };",
+ "export { };",
+ "function f() {}; f(); export { f };",
+ "var a, b, c; export { a, b as baz, c };",
+ "var d, e; export { d as dreary, e, };",
+ "export default function f() {}",
+ "export default class C {}",
+ "export default 42",
+ "var x; export default x = 7",
+ "export { Q } from 'somemodule.js';",
+ "export * from 'somemodule.js';",
+ "var foo; export { foo as for };",
+ "export { arguments } from 'm.js';",
+ "export { for } from 'm.js';",
+ "export { yield } from 'm.js'",
+ "export { static } from 'm.js'",
+ "export { let } from 'm.js'",
+
+ "import 'somemodule.js';",
+ "import { } from 'm.js';",
+ "import { a } from 'm.js';",
+ "import { a, b as d, c, } from 'm.js';",
+ "import * as thing from 'm.js';",
+ "import thing from 'm.js';",
+ "import thing, * as rest from 'm.js';",
+ "import thing, { a, b, c } from 'm.js';",
+ "import { arguments as a } from 'm.js';",
+ "import { for as f } from 'm.js';",
+ "import { yield as y } from 'm.js';",
+ "import { static as s } from 'm.js';",
+ "import { let as l } from 'm.js';",
+ };
+
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ for (unsigned i = 0; i < arraysize(kSources); ++i) {
+ int kProgramByteSize = i::StrLength(kSources[i]);
+ i::ScopedVector<char> program(kProgramByteSize + 1);
+ i::SNPrintF(program, "%s", kSources[i]);
+ i::Handle<i::String> source =
+ factory->NewStringFromUtf8(i::CStrVector(program.start()))
+ .ToHandleChecked();
+
+ // Show that parsing as a module works
+ {
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::CompilationInfoWithZone info(script);
+ i::Parser parser(&info, isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(), isolate->unicode_cache());
+ parser.set_allow_harmony_classes(true);
+ parser.set_allow_harmony_modules(true);
+ parser.set_allow_harmony_scoping(true);
+ info.MarkAsModule();
+ if (!parser.Parse(&info)) {
+ i::Handle<i::JSObject> exception_handle(
+ i::JSObject::cast(isolate->pending_exception()));
+ i::Handle<i::String> message_string =
+ i::Handle<i::String>::cast(i::Object::GetProperty(
+ isolate, exception_handle, "message").ToHandleChecked());
+
+ v8::base::OS::Print(
+ "Parser failed on:\n"
+ "\t%s\n"
+ "with error:\n"
+ "\t%s\n"
+ "However, we expected no error.",
+ source->ToCString().get(), message_string->ToCString().get());
+ CHECK(false);
+ }
+ }
+
+ // And that parsing a script does not.
+ {
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::CompilationInfoWithZone info(script);
+ i::Parser parser(&info, isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(), isolate->unicode_cache());
+ parser.set_allow_harmony_classes(true);
+ parser.set_allow_harmony_modules(true);
+ parser.set_allow_harmony_scoping(true);
+ info.MarkAsGlobal();
+ CHECK(!parser.Parse(&info));
+ }
+ }
+}
+
+
+TEST(ImportExportParsingErrors) {
+ const char* kErrorSources[] = {
+ "export {",
+ "var a; export { a",
+ "var a; export { a,",
+ "var a; export { a, ;",
+ "var a; export { a as };",
+ "var a, b; export { a as , b};",
+ "export }",
+ "var foo, bar; export { foo bar };",
+ "export { foo };",
+ "export { , };",
+ "export default;",
+ "export default var x = 7;",
+ "export default let x = 7;",
+ "export default const x = 7;",
+ "export *;",
+ "export * from;",
+ "export { Q } from;",
+ "export default from 'module.js';",
+ "export { for }",
+ "export { for as foo }",
+ "export { arguments }",
+ "export { arguments as foo }",
+ "var a; export { a, a };",
+
+ "import from;",
+ "import from 'm.js';",
+ "import { };",
+ "import {;",
+ "import };",
+ "import { , };",
+ "import { , } from 'm.js';",
+ "import { a } from;",
+ "import { a } 'm.js';",
+ "import , from 'm.js';",
+ "import a , from 'm.js';",
+ "import a { b, c } from 'm.js';",
+ "import arguments from 'm.js';",
+ "import eval from 'm.js';",
+ "import { arguments } from 'm.js';",
+ "import { eval } from 'm.js';",
+ "import { a as arguments } from 'm.js';",
+ "import { for } from 'm.js';",
+ "import { y as yield } from 'm.js'",
+ "import { s as static } from 'm.js'",
+ "import { l as let } from 'm.js'",
+ "import { x }, def from 'm.js';",
+ "import def, def2 from 'm.js';",
+ "import * as x, def from 'm.js';",
+ "import * as x, * as y from 'm.js';",
+ "import {x}, {y} from 'm.js';",
+ "import * as x, {y} from 'm.js';",
+
+ // TODO(ES6): These two forms should be supported
+ "export default function() {};",
+ "export default class {};"
+ };
+
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ for (unsigned i = 0; i < arraysize(kErrorSources); ++i) {
+ int kProgramByteSize = i::StrLength(kErrorSources[i]);
+ i::ScopedVector<char> program(kProgramByteSize + 1);
+ i::SNPrintF(program, "%s", kErrorSources[i]);
+ i::Handle<i::String> source =
+ factory->NewStringFromUtf8(i::CStrVector(program.start()))
+ .ToHandleChecked();
+
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::CompilationInfoWithZone info(script);
+ i::Parser parser(&info, isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(), isolate->unicode_cache());
+ parser.set_allow_harmony_classes(true);
+ parser.set_allow_harmony_modules(true);
+ parser.set_allow_harmony_scoping(true);
+ info.MarkAsModule();
+ CHECK(!parser.Parse(&info));
+ }
+}
+
+
+TEST(DuplicateProtoError) {
+ const char* context_data[][2] = {
+ {"({", "});"},
+ {"'use strict'; ({", "});"},
+ {NULL, NULL}
+ };
+ const char* error_data[] = {
+ "__proto__: {}, __proto__: {}",
+ "__proto__: {}, \"__proto__\": {}",
+ "__proto__: {}, \"__\x70roto__\": {}",
+ "__proto__: {}, a: 1, __proto__: {}",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, error_data, kError);
+}
+
+
+TEST(DuplicateProtoNoError) {
+ const char* context_data[][2] = {
+ {"({", "});"},
+ {"'use strict'; ({", "});"},
+ {NULL, NULL}
+ };
+ const char* error_data[] = {
+ "__proto__: {}, ['__proto__']: {}",
+ "__proto__: {}, __proto__() {}",
+ "__proto__: {}, get __proto__() {}",
+ "__proto__: {}, set __proto__(v) {}",
+ "__proto__: {}, __proto__",
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyComputedPropertyNames,
+ kAllowHarmonyObjectLiterals,
+ };
+ RunParserSyncTest(context_data, error_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(DeclarationsError) {
+ const char* context_data[][2] = {{"'use strict'; if (true)", ""},
+ {"'use strict'; if (false) {} else", ""},
+ {"'use strict'; while (false)", ""},
+ {"'use strict'; for (;;)", ""},
+ {"'use strict'; for (x in y)", ""},
+ {"'use strict'; do ", " while (false)"},
+ {"'use strong'; if (true)", ""},
+ {"'use strong'; if (false) {} else", ""},
+ {"'use strong'; while (false)", ""},
+ {"'use strong'; for (;;)", ""},
+ {"'use strong'; for (x in y)", ""},
+ {"'use strong'; do ", " while (false)"},
+ {NULL, NULL}};
+
+ const char* statement_data[] = {
+ "let x = 1;",
+ "const x = 1;",
+ "class C {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyClasses, kAllowHarmonyScoping, kAllowStrongMode};
+ RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+void TestLanguageMode(const char* source,
+ i::LanguageMode expected_language_mode) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ i::Handle<i::Script> script =
+ factory->NewScript(factory->NewStringFromAsciiChecked(source));
+ i::CompilationInfoWithZone info(script);
+ i::Parser parser(&info, isolate->stack_guard()->real_climit(),
+ isolate->heap()->HashSeed(), isolate->unicode_cache());
+ parser.set_allow_strong_mode(true);
+ info.MarkAsGlobal();
+ parser.Parse(&info);
+ CHECK(info.function() != NULL);
+ CHECK_EQ(expected_language_mode, info.function()->language_mode());
+}
+
+
+TEST(LanguageModeDirectives) {
+ TestLanguageMode("\"use nothing\"", i::SLOPPY);
+ TestLanguageMode("\"use strict\"", i::STRICT);
+ TestLanguageMode("\"use strong\"", i::STRONG);
+
+ TestLanguageMode("var x = 1; \"use strict\"", i::SLOPPY);
+ TestLanguageMode("var x = 1; \"use strong\"", i::SLOPPY);
+
+ // Test that multiple directives ("use strict" / "use strong") put the parser
+ // into the correct mode.
+ TestLanguageMode("\"use strict\"; \"use strong\";", i::STRONG);
+ TestLanguageMode("\"use strong\"; \"use strict\";", i::STRONG);
+
+ TestLanguageMode("\"use some future directive\"; \"use strict\";", i::STRICT);
+ TestLanguageMode("\"use some future directive\"; \"use strong\";", i::STRONG);
+}
+
+
+TEST(PropertyNameEvalArguments) {
+ const char* context_data[][2] = {{"'use strict';", ""},
+ {"'use strong';", ""},
+ {NULL, NULL}};
+
+ const char* statement_data[] = {
+ "({eval: 1})",
+ "({arguments: 1})",
+ "({eval() {}})",
+ "({arguments() {}})",
+ "({*eval() {}})",
+ "({*arguments() {}})",
+ "({get eval() {}})",
+ "({get arguments() {}})",
+ "({set eval(_) {}})",
+ "({set arguments(_) {}})",
+
+ "class C {eval() {}}",
+ "class C {arguments() {}}",
+ "class C {*eval() {}}",
+ "class C {*arguments() {}}",
+ "class C {get eval() {}}",
+ "class C {get arguments() {}}",
+ "class C {set eval(_) {}}",
+ "class C {set arguments(_) {}}",
+
+ "class C {static eval() {}}",
+ "class C {static arguments() {}}",
+ "class C {static *eval() {}}",
+ "class C {static *arguments() {}}",
+ "class C {static get eval() {}}",
+ "class C {static get arguments() {}}",
+ "class C {static set eval(_) {}}",
+ "class C {static set arguments(_) {}}",
+
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyClasses, kAllowHarmonyObjectLiterals, kAllowHarmonyScoping,
+ kAllowStrongMode};
+ RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(FunctionLiteralDuplicateParameters) {
+ const char* strict_context_data[][2] =
+ {{"'use strict';(function(", "){})();"},
+ {"(function(", ") { 'use strict'; })();"},
+ {"'use strict'; function fn(", ") {}; fn();"},
+ {"function fn(", ") { 'use strict'; }; fn();"},
+ {"'use strong';(function(", "){})();"},
+ {"(function(", ") { 'use strong'; })();"},
+ {"'use strong'; function fn(", ") {}; fn();"},
+ {"function fn(", ") { 'use strong'; }; fn();"},
+ {NULL, NULL}};
+
+ const char* sloppy_context_data[][2] =
+ {{"(function(", "){})();"},
+ {"(function(", ") {})();"},
+ {"function fn(", ") {}; fn();"},
+ {"function fn(", ") {}; fn();"},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "a, a",
+ "a, a, a",
+ "b, a, a",
+ "a, b, c, c",
+ "a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, w",
+ NULL};
+
+ static const ParserFlag always_flags[] = { kAllowStrongMode };
+ RunParserSyncTest(strict_context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, NULL, 0);
+}
+
+
+TEST(VarForbiddenInStrongMode) {
+ const char* strong_context_data[][2] =
+ {{"'use strong'; ", ""},
+ {"function f() {'use strong'; ", "}"},
+ {"function f() {'use strong'; while (true) { ", "} }"},
+ {NULL, NULL}};
+
+ const char* strict_context_data[][2] =
+ {{"'use strict'; ", ""},
+ {"function f() {'use strict'; ", "}"},
+ {"function f() {'use strict'; while (true) { ", "} }"},
+ {NULL, NULL}};
+
+ const char* sloppy_context_data[][2] =
+ {{"", ""},
+ {"function f() { ", "}"},
+ {NULL, NULL}};
+
+ const char* var_declarations[] = {
+ "var x = 0;",
+ "for (var i = 0; i < 10; i++) { }",
+ NULL};
+
+ const char* let_declarations[] = {
+ "let x = 0;",
+ "for (let i = 0; i < 10; i++) { }",
+ NULL};
+
+ const char* const_declarations[] = {
+ "const x = 0;",
+ NULL};
+
+ static const ParserFlag always_flags[] = {kAllowStrongMode,
+ kAllowHarmonyScoping};
+ RunParserSyncTest(strong_context_data, var_declarations, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strong_context_data, let_declarations, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strong_context_data, const_declarations, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ RunParserSyncTest(strict_context_data, var_declarations, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, let_declarations, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ RunParserSyncTest(sloppy_context_data, var_declarations, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+ // At the moment, let declarations are only available in strict mode.
+ RunParserSyncTest(sloppy_context_data, let_declarations, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(StrongEmptySubStatements) {
+ const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
+ const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
+ const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
+
+ const char* data[] = {
+ "if (1);",
+ "if (1) {} else;",
+ "while (1);",
+ "do; while (1);",
+ "for (;;);",
+ "for (x in []);",
+ "for (x of []);",
+ "for (const x = 0;;);",
+ "for (const x in []);",
+ "for (const x of []);",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowStrongMode, kAllowHarmonyScoping
+ };
+ RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(strong_context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(StrongForIn) {
+ const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
+ const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
+ const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
+
+ const char* data[] = {
+ "for (x in []) {}",
+ "for (const x in []) {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowStrongMode, kAllowHarmonyScoping
+ };
+ RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(strong_context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index 90926d1a96..2576af81a5 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -24,6 +24,10 @@ void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
__asm__ __volatile__("sw $sp, %0" : "=g"(sp_addr));
#elif V8_HOST_ARCH_MIPS64
__asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr));
+#elif defined(__PPC64__) || defined(_ARCH_PPC64)
+ __asm__ __volatile__("std 1, %0" : "=g"(sp_addr));
+#elif defined(__PPC__) || defined(_ARCH_PPC)
+ __asm__ __volatile__("stw 1, %0" : "=g"(sp_addr));
#else
#error Host architecture was not detected as supported by v8
#endif
@@ -52,7 +56,8 @@ TEST(StackAlignment) {
v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
v8::Local<v8::Value> result = foo->Call(global_object, 0, NULL);
- CHECK_EQ(0, result->Uint32Value() % v8::base::OS::ActivationFrameAlignment());
+ CHECK_EQ(0u,
+ result->Uint32Value() % v8::base::OS::ActivationFrameAlignment());
}
#endif // V8_CC_GNU
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 7578b35fbd..0a2c7a5625 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -52,17 +52,17 @@ TEST(ProfileNodeFindOrAddChild) {
ProfileNode* node = tree.root();
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
- CHECK_NE(NULL, childNode1);
+ CHECK(childNode1);
CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
ProfileNode* childNode2 = node->FindOrAddChild(&entry2);
- CHECK_NE(NULL, childNode2);
+ CHECK(childNode2);
CHECK_NE(childNode1, childNode2);
CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
CHECK_EQ(childNode2, node->FindOrAddChild(&entry2));
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
ProfileNode* childNode3 = node->FindOrAddChild(&entry3);
- CHECK_NE(NULL, childNode3);
+ CHECK(childNode3);
CHECK_NE(childNode1, childNode3);
CHECK_NE(childNode2, childNode3);
CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
@@ -77,7 +77,7 @@ TEST(ProfileNodeFindOrAddChildForSameFunction) {
ProfileNode* node = tree.root();
CodeEntry entry1(i::Logger::FUNCTION_TAG, aaa);
ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
- CHECK_NE(NULL, childNode1);
+ CHECK(childNode1);
CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
// The same function again.
CodeEntry entry2(i::Logger::FUNCTION_TAG, aaa);
@@ -117,64 +117,6 @@ class ProfileTreeTestHelper {
} // namespace
-TEST(ProfileTreeAddPathFromStart) {
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
- ProfileTree tree;
- ProfileTreeTestHelper helper(&tree);
- CHECK_EQ(NULL, helper.Walk(&entry1));
- CHECK_EQ(NULL, helper.Walk(&entry2));
- CHECK_EQ(NULL, helper.Walk(&entry3));
-
- CodeEntry* path[] = {NULL, &entry1, NULL, &entry2, NULL, NULL, &entry3, NULL};
- Vector<CodeEntry*> path_vec(path, sizeof(path) / sizeof(path[0]));
- tree.AddPathFromStart(path_vec);
- CHECK_EQ(NULL, helper.Walk(&entry2));
- CHECK_EQ(NULL, helper.Walk(&entry3));
- ProfileNode* node1 = helper.Walk(&entry1);
- CHECK_NE(NULL, node1);
- CHECK_EQ(0, node1->self_ticks());
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry1));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry3));
- ProfileNode* node2 = helper.Walk(&entry1, &entry2);
- CHECK_NE(NULL, node2);
- CHECK_NE(node1, node2);
- CHECK_EQ(0, node2->self_ticks());
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry1));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry2));
- ProfileNode* node3 = helper.Walk(&entry1, &entry2, &entry3);
- CHECK_NE(NULL, node3);
- CHECK_NE(node1, node3);
- CHECK_NE(node2, node3);
- CHECK_EQ(1, node3->self_ticks());
-
- tree.AddPathFromStart(path_vec);
- CHECK_EQ(node1, helper.Walk(&entry1));
- CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
- CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
- CHECK_EQ(0, node1->self_ticks());
- CHECK_EQ(0, node2->self_ticks());
- CHECK_EQ(2, node3->self_ticks());
-
- CodeEntry* path2[] = {&entry1, &entry2, &entry2};
- Vector<CodeEntry*> path2_vec(path2, sizeof(path2) / sizeof(path2[0]));
- tree.AddPathFromStart(path2_vec);
- CHECK_EQ(NULL, helper.Walk(&entry2));
- CHECK_EQ(NULL, helper.Walk(&entry3));
- CHECK_EQ(node1, helper.Walk(&entry1));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry1));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry3));
- CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry1));
- CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
- CHECK_EQ(2, node3->self_ticks());
- ProfileNode* node4 = helper.Walk(&entry1, &entry2, &entry2);
- CHECK_NE(NULL, node4);
- CHECK_NE(node3, node4);
- CHECK_EQ(1, node4->self_ticks());
-}
-
TEST(ProfileTreeAddPathFromEnd) {
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
@@ -182,64 +124,64 @@ TEST(ProfileTreeAddPathFromEnd) {
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
ProfileTree tree;
ProfileTreeTestHelper helper(&tree);
- CHECK_EQ(NULL, helper.Walk(&entry1));
- CHECK_EQ(NULL, helper.Walk(&entry2));
- CHECK_EQ(NULL, helper.Walk(&entry3));
+ CHECK(!helper.Walk(&entry1));
+ CHECK(!helper.Walk(&entry2));
+ CHECK(!helper.Walk(&entry3));
CodeEntry* path[] = {NULL, &entry3, NULL, &entry2, NULL, NULL, &entry1, NULL};
Vector<CodeEntry*> path_vec(path, sizeof(path) / sizeof(path[0]));
tree.AddPathFromEnd(path_vec);
- CHECK_EQ(NULL, helper.Walk(&entry2));
- CHECK_EQ(NULL, helper.Walk(&entry3));
+ CHECK(!helper.Walk(&entry2));
+ CHECK(!helper.Walk(&entry3));
ProfileNode* node1 = helper.Walk(&entry1);
- CHECK_NE(NULL, node1);
- CHECK_EQ(0, node1->self_ticks());
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry1));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry3));
+ CHECK(node1);
+ CHECK_EQ(0u, node1->self_ticks());
+ CHECK(!helper.Walk(&entry1, &entry1));
+ CHECK(!helper.Walk(&entry1, &entry3));
ProfileNode* node2 = helper.Walk(&entry1, &entry2);
- CHECK_NE(NULL, node2);
+ CHECK(node2);
CHECK_NE(node1, node2);
- CHECK_EQ(0, node2->self_ticks());
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry1));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry2));
+ CHECK_EQ(0u, node2->self_ticks());
+ CHECK(!helper.Walk(&entry1, &entry2, &entry1));
+ CHECK(!helper.Walk(&entry1, &entry2, &entry2));
ProfileNode* node3 = helper.Walk(&entry1, &entry2, &entry3);
- CHECK_NE(NULL, node3);
+ CHECK(node3);
CHECK_NE(node1, node3);
CHECK_NE(node2, node3);
- CHECK_EQ(1, node3->self_ticks());
+ CHECK_EQ(1u, node3->self_ticks());
tree.AddPathFromEnd(path_vec);
CHECK_EQ(node1, helper.Walk(&entry1));
CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
- CHECK_EQ(0, node1->self_ticks());
- CHECK_EQ(0, node2->self_ticks());
- CHECK_EQ(2, node3->self_ticks());
+ CHECK_EQ(0u, node1->self_ticks());
+ CHECK_EQ(0u, node2->self_ticks());
+ CHECK_EQ(2u, node3->self_ticks());
CodeEntry* path2[] = {&entry2, &entry2, &entry1};
Vector<CodeEntry*> path2_vec(path2, sizeof(path2) / sizeof(path2[0]));
tree.AddPathFromEnd(path2_vec);
- CHECK_EQ(NULL, helper.Walk(&entry2));
- CHECK_EQ(NULL, helper.Walk(&entry3));
+ CHECK(!helper.Walk(&entry2));
+ CHECK(!helper.Walk(&entry3));
CHECK_EQ(node1, helper.Walk(&entry1));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry1));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry3));
+ CHECK(!helper.Walk(&entry1, &entry1));
+ CHECK(!helper.Walk(&entry1, &entry3));
CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
- CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry1));
+ CHECK(!helper.Walk(&entry1, &entry2, &entry1));
CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
- CHECK_EQ(2, node3->self_ticks());
+ CHECK_EQ(2u, node3->self_ticks());
ProfileNode* node4 = helper.Walk(&entry1, &entry2, &entry2);
- CHECK_NE(NULL, node4);
+ CHECK(node4);
CHECK_NE(node3, node4);
- CHECK_EQ(1, node4->self_ticks());
+ CHECK_EQ(1u, node4->self_ticks());
}
TEST(ProfileTreeCalculateTotalTicks) {
ProfileTree empty_tree;
- CHECK_EQ(0, empty_tree.root()->self_ticks());
+ CHECK_EQ(0u, empty_tree.root()->self_ticks());
empty_tree.root()->IncrementSelfTicks();
- CHECK_EQ(1, empty_tree.root()->self_ticks());
+ CHECK_EQ(1u, empty_tree.root()->self_ticks());
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* e1_path[] = {&entry1};
@@ -247,38 +189,38 @@ TEST(ProfileTreeCalculateTotalTicks) {
e1_path, sizeof(e1_path) / sizeof(e1_path[0]));
ProfileTree single_child_tree;
- single_child_tree.AddPathFromStart(e1_path_vec);
+ single_child_tree.AddPathFromEnd(e1_path_vec);
single_child_tree.root()->IncrementSelfTicks();
- CHECK_EQ(1, single_child_tree.root()->self_ticks());
+ CHECK_EQ(1u, single_child_tree.root()->self_ticks());
ProfileTreeTestHelper single_child_helper(&single_child_tree);
ProfileNode* node1 = single_child_helper.Walk(&entry1);
- CHECK_NE(NULL, node1);
- CHECK_EQ(1, single_child_tree.root()->self_ticks());
- CHECK_EQ(1, node1->self_ticks());
+ CHECK(node1);
+ CHECK_EQ(1u, single_child_tree.root()->self_ticks());
+ CHECK_EQ(1u, node1->self_ticks());
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
- CodeEntry* e1_e2_path[] = {&entry1, &entry2};
- Vector<CodeEntry*> e1_e2_path_vec(
- e1_e2_path, sizeof(e1_e2_path) / sizeof(e1_e2_path[0]));
+ CodeEntry* e2_e1_path[] = {&entry2, &entry1};
+ Vector<CodeEntry*> e2_e1_path_vec(e2_e1_path,
+ sizeof(e2_e1_path) / sizeof(e2_e1_path[0]));
ProfileTree flat_tree;
ProfileTreeTestHelper flat_helper(&flat_tree);
- flat_tree.AddPathFromStart(e1_path_vec);
- flat_tree.AddPathFromStart(e1_path_vec);
- flat_tree.AddPathFromStart(e1_e2_path_vec);
- flat_tree.AddPathFromStart(e1_e2_path_vec);
- flat_tree.AddPathFromStart(e1_e2_path_vec);
+ flat_tree.AddPathFromEnd(e1_path_vec);
+ flat_tree.AddPathFromEnd(e1_path_vec);
+ flat_tree.AddPathFromEnd(e2_e1_path_vec);
+ flat_tree.AddPathFromEnd(e2_e1_path_vec);
+ flat_tree.AddPathFromEnd(e2_e1_path_vec);
// Results in {root,0,0} -> {entry1,0,2} -> {entry2,0,3}
- CHECK_EQ(0, flat_tree.root()->self_ticks());
+ CHECK_EQ(0u, flat_tree.root()->self_ticks());
node1 = flat_helper.Walk(&entry1);
- CHECK_NE(NULL, node1);
- CHECK_EQ(2, node1->self_ticks());
+ CHECK(node1);
+ CHECK_EQ(2u, node1->self_ticks());
ProfileNode* node2 = flat_helper.Walk(&entry1, &entry2);
- CHECK_NE(NULL, node2);
- CHECK_EQ(3, node2->self_ticks());
+ CHECK(node2);
+ CHECK_EQ(3u, node2->self_ticks());
// Must calculate {root,5,0} -> {entry1,5,2} -> {entry2,3,3}
- CHECK_EQ(0, flat_tree.root()->self_ticks());
- CHECK_EQ(2, node1->self_ticks());
+ CHECK_EQ(0u, flat_tree.root()->self_ticks());
+ CHECK_EQ(2u, node1->self_ticks());
CodeEntry* e2_path[] = {&entry2};
Vector<CodeEntry*> e2_path_vec(
@@ -290,40 +232,40 @@ TEST(ProfileTreeCalculateTotalTicks) {
ProfileTree wide_tree;
ProfileTreeTestHelper wide_helper(&wide_tree);
- wide_tree.AddPathFromStart(e1_path_vec);
- wide_tree.AddPathFromStart(e1_path_vec);
- wide_tree.AddPathFromStart(e1_e2_path_vec);
- wide_tree.AddPathFromStart(e2_path_vec);
- wide_tree.AddPathFromStart(e2_path_vec);
- wide_tree.AddPathFromStart(e2_path_vec);
- wide_tree.AddPathFromStart(e3_path_vec);
- wide_tree.AddPathFromStart(e3_path_vec);
- wide_tree.AddPathFromStart(e3_path_vec);
- wide_tree.AddPathFromStart(e3_path_vec);
+ wide_tree.AddPathFromEnd(e1_path_vec);
+ wide_tree.AddPathFromEnd(e1_path_vec);
+ wide_tree.AddPathFromEnd(e2_e1_path_vec);
+ wide_tree.AddPathFromEnd(e2_path_vec);
+ wide_tree.AddPathFromEnd(e2_path_vec);
+ wide_tree.AddPathFromEnd(e2_path_vec);
+ wide_tree.AddPathFromEnd(e3_path_vec);
+ wide_tree.AddPathFromEnd(e3_path_vec);
+ wide_tree.AddPathFromEnd(e3_path_vec);
+ wide_tree.AddPathFromEnd(e3_path_vec);
// Results in -> {entry1,0,2} -> {entry2,0,1}
// {root,0,0} -> {entry2,0,3}
// -> {entry3,0,4}
- CHECK_EQ(0, wide_tree.root()->self_ticks());
+ CHECK_EQ(0u, wide_tree.root()->self_ticks());
node1 = wide_helper.Walk(&entry1);
- CHECK_NE(NULL, node1);
- CHECK_EQ(2, node1->self_ticks());
+ CHECK(node1);
+ CHECK_EQ(2u, node1->self_ticks());
ProfileNode* node1_2 = wide_helper.Walk(&entry1, &entry2);
- CHECK_NE(NULL, node1_2);
- CHECK_EQ(1, node1_2->self_ticks());
+ CHECK(node1_2);
+ CHECK_EQ(1u, node1_2->self_ticks());
node2 = wide_helper.Walk(&entry2);
- CHECK_NE(NULL, node2);
- CHECK_EQ(3, node2->self_ticks());
+ CHECK(node2);
+ CHECK_EQ(3u, node2->self_ticks());
ProfileNode* node3 = wide_helper.Walk(&entry3);
- CHECK_NE(NULL, node3);
- CHECK_EQ(4, node3->self_ticks());
+ CHECK(node3);
+ CHECK_EQ(4u, node3->self_ticks());
// Calculates -> {entry1,3,2} -> {entry2,1,1}
// {root,10,0} -> {entry2,3,3}
// -> {entry3,4,4}
- CHECK_EQ(0, wide_tree.root()->self_ticks());
- CHECK_EQ(2, node1->self_ticks());
- CHECK_EQ(1, node1_2->self_ticks());
- CHECK_EQ(3, node2->self_ticks());
- CHECK_EQ(4, node3->self_ticks());
+ CHECK_EQ(0u, wide_tree.root()->self_ticks());
+ CHECK_EQ(2u, node1->self_ticks());
+ CHECK_EQ(1u, node1_2->self_ticks());
+ CHECK_EQ(3u, node2->self_ticks());
+ CHECK_EQ(4u, node3->self_ticks());
}
@@ -342,23 +284,23 @@ TEST(CodeMapAddCode) {
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
code_map.AddCode(ToAddress(0x1900), &entry3, 0x50);
code_map.AddCode(ToAddress(0x1950), &entry4, 0x10);
- CHECK_EQ(NULL, code_map.FindEntry(0));
- CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1500 - 1)));
+ CHECK(!code_map.FindEntry(0));
+ CHECK(!code_map.FindEntry(ToAddress(0x1500 - 1)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500 + 0x100)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500 + 0x200 - 1)));
CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700)));
CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700 + 0x50)));
CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700 + 0x100 - 1)));
- CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1700 + 0x100)));
- CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1900 - 1)));
+ CHECK(!code_map.FindEntry(ToAddress(0x1700 + 0x100)));
+ CHECK(!code_map.FindEntry(ToAddress(0x1900 - 1)));
CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1900)));
CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1900 + 0x28)));
CHECK_EQ(&entry4, code_map.FindEntry(ToAddress(0x1950)));
CHECK_EQ(&entry4, code_map.FindEntry(ToAddress(0x1950 + 0x7)));
CHECK_EQ(&entry4, code_map.FindEntry(ToAddress(0x1950 + 0x10 - 1)));
- CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1950 + 0x10)));
- CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0xFFFFFFFF)));
+ CHECK(!code_map.FindEntry(ToAddress(0x1950 + 0x10)));
+ CHECK(!code_map.FindEntry(ToAddress(0xFFFFFFFF)));
}
@@ -371,11 +313,11 @@ TEST(CodeMapMoveAndDeleteCode) {
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700)));
code_map.MoveCode(ToAddress(0x1500), ToAddress(0x1700)); // Deprecate bbb.
- CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1500)));
+ CHECK(!code_map.FindEntry(ToAddress(0x1500)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1700)));
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
code_map.AddCode(ToAddress(0x1750), &entry3, 0x100);
- CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1700)));
+ CHECK(!code_map.FindEntry(ToAddress(0x1700)));
CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1750)));
}
@@ -438,26 +380,26 @@ TEST(RecordTickSample) {
generator.RecordTickSample(sample3);
CpuProfile* profile = profiles.StopProfiling("");
- CHECK_NE(NULL, profile);
+ CHECK(profile);
ProfileTreeTestHelper top_down_test_helper(profile->top_down());
- CHECK_EQ(NULL, top_down_test_helper.Walk(entry2));
- CHECK_EQ(NULL, top_down_test_helper.Walk(entry3));
+ CHECK(!top_down_test_helper.Walk(entry2));
+ CHECK(!top_down_test_helper.Walk(entry3));
ProfileNode* node1 = top_down_test_helper.Walk(entry1);
- CHECK_NE(NULL, node1);
+ CHECK(node1);
CHECK_EQ(entry1, node1->entry());
ProfileNode* node2 = top_down_test_helper.Walk(entry1, entry1);
- CHECK_NE(NULL, node2);
+ CHECK(node2);
CHECK_EQ(entry1, node2->entry());
ProfileNode* node3 = top_down_test_helper.Walk(entry1, entry2, entry3);
- CHECK_NE(NULL, node3);
+ CHECK(node3);
CHECK_EQ(entry3, node3->entry());
ProfileNode* node4 = top_down_test_helper.Walk(entry1, entry3, entry1);
- CHECK_NE(NULL, node4);
+ CHECK(node4);
CHECK_EQ(entry1, node4->entry());
}
-static void CheckNodeIds(ProfileNode* node, int* expectedId) {
+static void CheckNodeIds(ProfileNode* node, unsigned* expectedId) {
CHECK_EQ((*expectedId)++, node->id());
for (int i = 0; i < node->children()->length(); i++) {
CheckNodeIds(node->children()->at(i), expectedId);
@@ -501,12 +443,12 @@ TEST(SampleIds) {
generator.RecordTickSample(sample3);
CpuProfile* profile = profiles.StopProfiling("");
- int nodeId = 1;
+ unsigned nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
- CHECK_EQ(7, nodeId - 1);
+ CHECK_EQ(7u, nodeId - 1);
CHECK_EQ(3, profile->samples_count());
- int expected_id[] = {3, 5, 7};
+ unsigned expected_id[] = {3, 5, 7};
for (int i = 0; i < 3; i++) {
CHECK_EQ(expected_id[i], profile->sample(i)->id());
}
@@ -530,9 +472,9 @@ TEST(NoSamples) {
generator.RecordTickSample(sample1);
CpuProfile* profile = profiles.StopProfiling("");
- int nodeId = 1;
+ unsigned nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
- CHECK_EQ(3, nodeId - 1);
+ CHECK_EQ(3u, nodeId - 1);
CHECK_EQ(0, profile->samples_count());
}
@@ -580,13 +522,13 @@ TEST(RecordStackTraceAtStartProfiling) {
// startProfiling
// if the sampler managed to get a tick.
current = PickChild(current, "");
- CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ CHECK(const_cast<ProfileNode*>(current));
current = PickChild(current, "a");
- CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ CHECK(const_cast<ProfileNode*>(current));
current = PickChild(current, "b");
- CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ CHECK(const_cast<ProfileNode*>(current));
current = PickChild(current, "c");
- CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ CHECK(const_cast<ProfileNode*>(current));
CHECK(current->children()->length() == 0 ||
current->children()->length() == 1);
if (current->children()->length() == 1) {
@@ -658,14 +600,14 @@ TEST(ProfileNodeScriptId) {
// startProfiling
// if the sampler managed to get a tick.
current = PickChild(current, "");
- CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
+ CHECK(const_cast<v8::CpuProfileNode*>(current));
current = PickChild(current, "b");
- CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
+ CHECK(const_cast<v8::CpuProfileNode*>(current));
CHECK_EQ(script_b->GetUnboundScript()->GetId(), current->GetScriptId());
current = PickChild(current, "a");
- CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
+ CHECK(const_cast<v8::CpuProfileNode*>(current));
CHECK_EQ(script_a->GetUnboundScript()->GetId(), current->GetScriptId());
}
@@ -764,13 +706,13 @@ TEST(BailoutReason) {
// kTryFinally
// kTryCatch
current = PickChild(current, "");
- CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
+ CHECK(const_cast<v8::CpuProfileNode*>(current));
current = PickChild(current, "TryFinally");
- CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
+ CHECK(const_cast<v8::CpuProfileNode*>(current));
CHECK(!strcmp("TryFinallyStatement", current->GetBailoutReason()));
current = PickChild(current, "TryCatch");
- CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
+ CHECK(const_cast<v8::CpuProfileNode*>(current));
CHECK(!strcmp("TryCatchStatement", current->GetBailoutReason()));
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 4a572c8160..e7fcbd10e0 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -38,7 +38,6 @@
#include "src/regexp-macro-assembler.h"
#include "src/regexp-macro-assembler-irregexp.h"
#include "src/string-stream.h"
-#include "src/zone-inl.h"
#ifdef V8_INTERPRETED_REGEXP
#include "src/interpreter-irregexp.h"
#else // V8_INTERPRETED_REGEXP
@@ -53,6 +52,11 @@
#include "src/arm64/macro-assembler-arm64.h"
#include "src/arm64/regexp-macro-assembler-arm64.h"
#endif
+#if V8_TARGET_ARCH_PPC
+#include "src/ppc/assembler-ppc.h"
+#include "src/ppc/macro-assembler-ppc.h"
+#include "src/ppc/regexp-macro-assembler-ppc.h"
+#endif
#if V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips.h"
#include "src/mips/macro-assembler-mips.h"
@@ -86,36 +90,36 @@ using namespace v8::internal;
static bool CheckParse(const char* input) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate());
+ Zone zone;
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
return v8::internal::RegExpParser::ParseRegExp(
- &reader, false, &result, &zone);
+ CcTest::i_isolate(), &zone, &reader, false, false, &result);
}
static void CheckParseEq(const char* input, const char* expected) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate());
+ Zone zone;
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
- &reader, false, &result, &zone));
+ CcTest::i_isolate(), &zone, &reader, false, false, &result));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
std::ostringstream os;
result.tree->Print(os, &zone);
- CHECK_EQ(expected, os.str().c_str());
+ CHECK_EQ(0, strcmp(expected, os.str().c_str()));
}
static bool CheckSimple(const char* input) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate());
+ Zone zone;
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
- &reader, false, &result, &zone));
+ CcTest::i_isolate(), &zone, &reader, false, false, &result));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
return result.simple;
@@ -129,11 +133,11 @@ struct MinMaxPair {
static MinMaxPair CheckMinMaxMatch(const char* input) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate());
+ Zone zone;
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
- &reader, false, &result, &zone));
+ CcTest::i_isolate(), &zone, &reader, false, false, &result));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
int min_match = result.tree->min_match();
@@ -402,15 +406,15 @@ TEST(ParserRegression) {
static void ExpectError(const char* input,
const char* expected) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate());
+ Zone zone;
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(!v8::internal::RegExpParser::ParseRegExp(
- &reader, false, &result, &zone));
+ CcTest::i_isolate(), &zone, &reader, false, false, &result));
CHECK(result.tree == NULL);
CHECK(!result.error.is_null());
SmartArrayPointer<char> str = result.error->ToCString(ALLOW_NULLS);
- CHECK_EQ(expected, str.get());
+ CHECK_EQ(0, strcmp(expected, str.get()));
}
@@ -471,7 +475,7 @@ static bool NotWord(uc16 c) {
static void TestCharacterClassEscapes(uc16 c, bool (pred)(uc16 c)) {
- Zone zone(CcTest::i_isolate());
+ Zone zone;
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(2, &zone);
CharacterRange::AddClassEscape(c, ranges, &zone);
@@ -497,29 +501,31 @@ TEST(CharacterClassEscapes) {
}
-static RegExpNode* Compile(const char* input, bool multiline, bool is_one_byte,
- Zone* zone) {
+static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
+ bool is_one_byte, Zone* zone) {
Isolate* isolate = CcTest::i_isolate();
FlatStringReader reader(isolate, CStrVector(input));
RegExpCompileData compile_data;
- if (!v8::internal::RegExpParser::ParseRegExp(&reader, multiline,
- &compile_data, zone))
+ if (!v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), zone,
+ &reader, multiline, unicode,
+ &compile_data))
return NULL;
- Handle<String> pattern = isolate->factory()->
- NewStringFromUtf8(CStrVector(input)).ToHandleChecked();
+ Handle<String> pattern = isolate->factory()
+ ->NewStringFromUtf8(CStrVector(input))
+ .ToHandleChecked();
Handle<String> sample_subject =
isolate->factory()->NewStringFromUtf8(CStrVector("")).ToHandleChecked();
- RegExpEngine::Compile(&compile_data, false, false, multiline, false, pattern,
- sample_subject, is_one_byte, zone);
+ RegExpEngine::Compile(isolate, zone, &compile_data, false, false, multiline,
+ false, pattern, sample_subject, is_one_byte);
return compile_data.node;
}
-static void Execute(const char* input, bool multiline, bool is_one_byte,
- bool dot_output = false) {
+static void Execute(const char* input, bool multiline, bool unicode,
+ bool is_one_byte, bool dot_output = false) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate());
- RegExpNode* node = Compile(input, multiline, is_one_byte, &zone);
+ Zone zone;
+ RegExpNode* node = Compile(input, multiline, unicode, is_one_byte, &zone);
USE(node);
#ifdef DEBUG
if (dot_output) {
@@ -556,7 +562,7 @@ static unsigned PseudoRandom(int i, int j) {
TEST(SplayTreeSimple) {
static const unsigned kLimit = 1000;
- Zone zone(CcTest::i_isolate());
+ Zone zone;
ZoneSplayTree<TestConfig> tree(&zone);
bool seen[kLimit];
for (unsigned i = 0; i < kLimit; i++) seen[i] = false;
@@ -566,7 +572,7 @@ TEST(SplayTreeSimple) {
} while (false)
for (int i = 0; i < 50; i++) {
for (int j = 0; j < 50; j++) {
- unsigned next = PseudoRandom(i, j) % kLimit;
+ int next = PseudoRandom(i, j) % kLimit;
if (seen[next]) {
// We've already seen this one. Check the value and remove
// it.
@@ -623,7 +629,7 @@ TEST(DispatchTableConstruction) {
}
}
// Enter test data into dispatch table.
- Zone zone(CcTest::i_isolate());
+ Zone zone;
DispatchTable table(&zone);
for (int i = 0; i < kRangeCount; i++) {
uc16* range = ranges[i];
@@ -686,6 +692,8 @@ typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM64
typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_PPC
+typedef RegExpMacroAssemblerPPC ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS64
@@ -733,9 +741,10 @@ TEST(MacroAssemblerNativeSuccess) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
+ 4);
m.Succeed();
@@ -770,9 +779,10 @@ TEST(MacroAssemblerNativeSimple) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
+ 4);
Label fail, backtrack;
m.PushBacktrack(&fail);
@@ -836,9 +846,10 @@ TEST(MacroAssemblerNativeSimpleUC16) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 4, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::UC16,
+ 4);
Label fail, backtrack;
m.PushBacktrack(&fail);
@@ -908,9 +919,10 @@ TEST(MacroAssemblerNativeBacktrack) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 0, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
+ 0);
Label fail;
Label backtrack;
@@ -948,9 +960,10 @@ TEST(MacroAssemblerNativeBackReferenceLATIN1) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
+ 4);
m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2);
@@ -997,9 +1010,10 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 4, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::UC16,
+ 4);
m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2);
@@ -1049,9 +1063,10 @@ TEST(MacroAssemblernativeAtStart) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 0, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
+ 0);
Label not_at_start, newline, fail;
m.CheckNotAtStart(&not_at_start);
@@ -1108,9 +1123,10 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
+ 4);
Label fail, succ;
@@ -1166,9 +1182,10 @@ TEST(MacroAssemblerNativeRegisters) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 6, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
+ 6);
uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3);
@@ -1267,9 +1284,10 @@ TEST(MacroAssemblerStackOverflow) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 0, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
+ 0);
Label loop;
m.Bind(&loop);
@@ -1305,9 +1323,10 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate);
+ Zone zone;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 2, &zone);
+ ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
+ 2);
// At least 2048, to ensure the allocated space for registers
// span one full page.
@@ -1352,8 +1371,9 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
TEST(MacroAssembler) {
byte codes[1024];
- Zone zone(CcTest::i_isolate());
- RegExpMacroAssemblerIrregexp m(Vector<byte>(codes, 1024), &zone);
+ Zone zone;
+ RegExpMacroAssemblerIrregexp m(CcTest::i_isolate(), Vector<byte>(codes, 1024),
+ &zone);
// ^f(o)o.
Label start, fail, backtrack;
@@ -1419,7 +1439,7 @@ TEST(AddInverseToTable) {
static const int kLimit = 1000;
static const int kRangeCount = 16;
for (int t = 0; t < 10; t++) {
- Zone zone(CcTest::i_isolate());
+ Zone zone;
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(kRangeCount, &zone);
for (int i = 0; i < kRangeCount; i++) {
@@ -1440,7 +1460,7 @@ TEST(AddInverseToTable) {
CHECK_EQ(is_on, set->Get(0) == false);
}
}
- Zone zone(CcTest::i_isolate());
+ Zone zone;
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(1, &zone);
ranges->Add(CharacterRange(0xFFF0, 0xFFFE), &zone);
@@ -1467,8 +1487,8 @@ static uc32 canonicalize(uc32 c) {
TEST(LatinCanonicalize) {
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> un_canonicalize;
- for (char lower = 'a'; lower <= 'z'; lower++) {
- char upper = lower + ('A' - 'a');
+ for (unibrow::uchar lower = 'a'; lower <= 'z'; lower++) {
+ unibrow::uchar upper = lower + ('A' - 'a');
CHECK_EQ(canonicalize(lower), canonicalize(upper));
unibrow::uchar uncanon[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int length = un_canonicalize.get(lower, '\0', uncanon);
@@ -1551,13 +1571,13 @@ TEST(UncanonicalizeEquivalence) {
}
-static void TestRangeCaseIndependence(CharacterRange input,
+static void TestRangeCaseIndependence(Isolate* isolate, CharacterRange input,
Vector<CharacterRange> expected) {
- Zone zone(CcTest::i_isolate());
+ Zone zone;
int count = expected.length();
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(count, &zone);
- input.AddCaseEquivalents(list, false, &zone);
+ input.AddCaseEquivalents(isolate, &zone, list, false);
CHECK_EQ(count, list->length());
for (int i = 0; i < list->length(); i++) {
CHECK_EQ(expected[i].from(), list->at(i).from());
@@ -1566,39 +1586,41 @@ static void TestRangeCaseIndependence(CharacterRange input,
}
-static void TestSimpleRangeCaseIndependence(CharacterRange input,
+static void TestSimpleRangeCaseIndependence(Isolate* isolate,
+ CharacterRange input,
CharacterRange expected) {
EmbeddedVector<CharacterRange, 1> vector;
vector[0] = expected;
- TestRangeCaseIndependence(input, vector);
+ TestRangeCaseIndependence(isolate, input, vector);
}
TEST(CharacterRangeCaseIndependence) {
- TestSimpleRangeCaseIndependence(CharacterRange::Singleton('a'),
+ Isolate* isolate = CcTest::i_isolate();
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange::Singleton('a'),
CharacterRange::Singleton('A'));
- TestSimpleRangeCaseIndependence(CharacterRange::Singleton('z'),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange::Singleton('z'),
CharacterRange::Singleton('Z'));
- TestSimpleRangeCaseIndependence(CharacterRange('a', 'z'),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange('a', 'z'),
CharacterRange('A', 'Z'));
- TestSimpleRangeCaseIndependence(CharacterRange('c', 'f'),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange('c', 'f'),
CharacterRange('C', 'F'));
- TestSimpleRangeCaseIndependence(CharacterRange('a', 'b'),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange('a', 'b'),
CharacterRange('A', 'B'));
- TestSimpleRangeCaseIndependence(CharacterRange('y', 'z'),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange('y', 'z'),
CharacterRange('Y', 'Z'));
- TestSimpleRangeCaseIndependence(CharacterRange('a' - 1, 'z' + 1),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange('a' - 1, 'z' + 1),
CharacterRange('A', 'Z'));
- TestSimpleRangeCaseIndependence(CharacterRange('A', 'Z'),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange('A', 'Z'),
CharacterRange('a', 'z'));
- TestSimpleRangeCaseIndependence(CharacterRange('C', 'F'),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange('C', 'F'),
CharacterRange('c', 'f'));
- TestSimpleRangeCaseIndependence(CharacterRange('A' - 1, 'Z' + 1),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange('A' - 1, 'Z' + 1),
CharacterRange('a', 'z'));
// Here we need to add [l-z] to complete the case independence of
// [A-Za-z] but we expect [a-z] to be added since we always add a
// whole block at a time.
- TestSimpleRangeCaseIndependence(CharacterRange('A', 'k'),
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange('A', 'k'),
CharacterRange('a', 'z'));
}
@@ -1616,7 +1638,7 @@ static bool InClass(uc16 c, ZoneList<CharacterRange>* ranges) {
TEST(CharClassDifference) {
- Zone zone(CcTest::i_isolate());
+ Zone zone;
ZoneList<CharacterRange>* base =
new(&zone) ZoneList<CharacterRange>(1, &zone);
base->Add(CharacterRange::Everything(), &zone);
@@ -1643,7 +1665,7 @@ TEST(CharClassDifference) {
TEST(CanonicalizeCharacterSets) {
- Zone zone(CcTest::i_isolate());
+ Zone zone;
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(4, &zone);
CharacterSet set(list);
@@ -1704,7 +1726,7 @@ TEST(CanonicalizeCharacterSets) {
TEST(CharacterRangeMerge) {
- Zone zone(CcTest::i_isolate());
+ Zone zone;
ZoneList<CharacterRange> l1(4, &zone);
ZoneList<CharacterRange> l2(4, &zone);
// Create all combinations of intersections of ranges, both singletons and
diff --git a/deps/v8/test/cctest/test-reloc-info.cc b/deps/v8/test/cctest/test-reloc-info.cc
index 94ed287c44..a238c3a7d8 100644
--- a/deps/v8/test/cctest/test-reloc-info.cc
+++ b/deps/v8/test/cctest/test-reloc-info.cc
@@ -55,10 +55,16 @@ TEST(Positions) {
for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
RelocInfo::Mode mode = (i % 2 == 0) ?
RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
+ if (mode == RelocInfo::STATEMENT_POSITION) {
+ printf("TEST WRITING STATEMENT %p %d\n", pc, pos);
+ } else {
+ printf("TEST WRITING POSITION %p %d\n", pc, pos);
+ }
WriteRinfo(&writer, pc, mode, pos);
CHECK(writer.pos() - RelocInfoWriter::kMaxSize >= relocation_info_end);
}
+ writer.Finish();
relocation_info_size = static_cast<int>(buffer_end - writer.pos());
CodeDesc desc = { buffer.get(), buffer_size, code_size,
relocation_info_size, NULL };
@@ -68,6 +74,7 @@ TEST(Positions) {
RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::POSITION));
pc = buffer.get();
for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
+ printf("TESTING 1: %d\n", i);
RelocInfo::Mode mode = (i % 2 == 0) ?
RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
if (mode == RelocInfo::POSITION) {
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index 2f6f92eb19..698eda8562 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -65,6 +65,12 @@ class SimulatorHelper {
simulator_->get_register(v8::internal::Simulator::sp));
state->fp = reinterpret_cast<void*>(
simulator_->get_register(v8::internal::Simulator::fp));
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+ state->pc = reinterpret_cast<void*>(simulator_->get_pc());
+ state->sp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::sp));
+ state->fp = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::fp));
#endif
}
@@ -85,7 +91,7 @@ class SamplingTestHelper {
explicit SamplingTestHelper(const std::string& test_function)
: sample_is_taken_(false), isolate_(CcTest::isolate()) {
- DCHECK_EQ(NULL, instance_);
+ DCHECK(!instance_);
instance_ = this;
v8::HandleScope scope(isolate_);
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate_);
@@ -236,10 +242,10 @@ TEST(StackFramesConsistent) {
const SamplingTestHelper::CodeEventEntry* entry;
entry = helper.FindEventEntry(sample.begin()[0]);
- CHECK_NE(NULL, entry);
+ CHECK(entry);
CHECK(std::string::npos != entry->name.find("test_sampler_api_inner"));
entry = helper.FindEventEntry(sample.begin()[1]);
- CHECK_NE(NULL, entry);
+ CHECK(entry);
CHECK(std::string::npos != entry->name.find("test_sampler_api_outer"));
}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 45da25024f..55eac60f37 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -58,7 +58,7 @@ static uint32_t Encode(const ExternalReferenceEncoder& encoder, T id) {
}
-static int make_code(TypeCode type, int id) {
+static uint32_t make_code(TypeCode type, int id) {
return static_cast<uint32_t>(type) << kReferenceTypeShift | id;
}
@@ -88,7 +88,7 @@ TEST(ExternalReferenceEncoder) {
CHECK_EQ(
make_code(UNCLASSIFIED, 1),
encoder.Encode(ExternalReference::roots_array_start(isolate).address()));
- CHECK_EQ(make_code(UNCLASSIFIED, 34),
+ CHECK_EQ(make_code(UNCLASSIFIED, 33),
encoder.Encode(ExternalReference::cpu_features().address()));
}
@@ -133,7 +133,7 @@ static bool WriteToFile(Isolate* isolate, const char* snapshot_file) {
SnapshotByteSink sink;
StartupSerializer ser(isolate, &sink);
ser.Serialize();
- SnapshotData snapshot_data(sink, ser);
+ SnapshotData snapshot_data(ser);
WritePayload(snapshot_data.RawData(), snapshot_file);
return true;
}
@@ -156,6 +156,23 @@ static void Serialize(v8::Isolate* isolate) {
}
+Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
+ Vector<const uint8_t> body,
+ Vector<const uint8_t> tail, int repeats) {
+ int source_length = head.length() + body.length() * repeats + tail.length();
+ uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
+ CopyChars(source, head.start(), head.length());
+ for (int i = 0; i < repeats; i++) {
+ CopyChars(source + head.length() + i * body.length(), body.start(),
+ body.length());
+ }
+ CopyChars(source + head.length() + repeats * body.length(), tail.start(),
+ tail.length());
+ return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
+ source_length);
+}
+
+
// Test that the whole heap can be serialized.
UNINITIALIZED_TEST(Serialize) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
@@ -359,8 +376,8 @@ UNINITIALIZED_TEST(PartialSerialization) {
startup_serializer.SerializeWeakReferences();
- SnapshotData startup_snapshot(startup_sink, startup_serializer);
- SnapshotData partial_snapshot(partial_sink, partial_serializer);
+ SnapshotData startup_snapshot(startup_serializer);
+ SnapshotData partial_snapshot(partial_serializer);
WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
WritePayload(startup_snapshot.RawData(), startup_name.start());
@@ -391,24 +408,31 @@ UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
byte* snapshot = ReadBytes(file_name, &snapshot_size);
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- Object* root;
+ HandleScope handle_scope(isolate);
+ Handle<Object> root;
+ Handle<FixedArray> outdated_contexts;
+ // Intentionally empty handle. The deserializer should not come across
+ // any references to the global proxy in this test.
+ Handle<JSGlobalProxy> global_proxy = Handle<JSGlobalProxy>::null();
{
SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
Deserializer deserializer(&snapshot_data);
- deserializer.DeserializePartial(isolate, &root);
+ root = deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts)
+ .ToHandleChecked();
+ CHECK_EQ(0, outdated_contexts->length());
CHECK(root->IsString());
}
- HandleScope handle_scope(isolate);
- Handle<Object> root_handle(root, isolate);
-
- Object* root2;
+ Handle<Object> root2;
{
SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
Deserializer deserializer(&snapshot_data);
- deserializer.DeserializePartial(isolate, &root2);
+ root2 = deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts)
+ .ToHandleChecked();
CHECK(root2->IsString());
- CHECK(*root_handle == root2);
+ CHECK(root.is_identical_to(root2));
}
}
v8_isolate->Dispose();
@@ -470,8 +494,8 @@ UNINITIALIZED_TEST(ContextSerialization) {
partial_serializer.Serialize(&raw_context);
startup_serializer.SerializeWeakReferences();
- SnapshotData startup_snapshot(startup_sink, startup_serializer);
- SnapshotData partial_snapshot(partial_sink, partial_serializer);
+ SnapshotData startup_snapshot(startup_serializer);
+ SnapshotData partial_snapshot(partial_serializer);
WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
WritePayload(startup_snapshot.RawData(), startup_name.start());
@@ -501,24 +525,178 @@ UNINITIALIZED_DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
byte* snapshot = ReadBytes(file_name, &snapshot_size);
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- Object* root;
+ HandleScope handle_scope(isolate);
+ Handle<Object> root;
+ Handle<FixedArray> outdated_contexts;
+ Handle<JSGlobalProxy> global_proxy =
+ isolate->factory()->NewUninitializedJSGlobalProxy();
{
SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
Deserializer deserializer(&snapshot_data);
- deserializer.DeserializePartial(isolate, &root);
+ root = deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts)
+ .ToHandleChecked();
CHECK(root->IsContext());
+ CHECK(Handle<Context>::cast(root)->global_proxy() == *global_proxy);
+ CHECK_EQ(1, outdated_contexts->length());
}
- HandleScope handle_scope(isolate);
- Handle<Object> root_handle(root, isolate);
-
- Object* root2;
+ Handle<Object> root2;
{
SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
Deserializer deserializer(&snapshot_data);
- deserializer.DeserializePartial(isolate, &root2);
+ root2 = deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts)
+ .ToHandleChecked();
CHECK(root2->IsContext());
- CHECK(*root_handle != root2);
+ CHECK(!root.is_identical_to(root2));
+ }
+ }
+ v8_isolate->Dispose();
+ }
+}
+
+
+UNINITIALIZED_TEST(CustomContextSerialization) {
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
+ v8::Isolate::CreateParams params;
+ params.enable_serializer = true;
+ v8::Isolate* v8_isolate = v8::Isolate::New(params);
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ {
+ v8::Isolate::Scope isolate_scope(v8_isolate);
+
+ v8::Persistent<v8::Context> env;
+ {
+ HandleScope scope(isolate);
+ env.Reset(v8_isolate, v8::Context::New(v8_isolate));
+ }
+ DCHECK(!env.IsEmpty());
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+ // After execution, e's function context refers to the global object.
+ CompileRun(
+ "var e;"
+ "(function() {"
+ " e = function(s) { return eval (s); }"
+ "})();"
+ "var o = this;"
+ "var r = Math.random() + Math.cos(0);"
+ "var f = (function(a, b) { return a + b; }).bind(1, 2, 3);"
+ "var s = parseInt('12345');");
+
+ Vector<const uint8_t> source = ConstructSource(
+ STATIC_CHAR_VECTOR("function g() { return [,"),
+ STATIC_CHAR_VECTOR("1,"),
+ STATIC_CHAR_VECTOR("];} a = g(); b = g(); b.push(1);"), 100000);
+ v8::Handle<v8::String> source_str = v8::String::NewFromOneByte(
+ v8_isolate, source.start(), v8::String::kNormalString,
+ source.length());
+ CompileRun(source_str);
+ source.Dispose();
+ }
+ // Make sure all builtin scripts are cached.
+ {
+ HandleScope scope(isolate);
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ isolate->bootstrapper()->NativesSourceLookup(i);
+ }
+ }
+ // If we don't do this then we end up with a stray root pointing at the
+ // context even after we have disposed of env.
+ isolate->heap()->CollectAllAvailableGarbage("snapshotting");
+
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+ }
+
+ i::Object* raw_context = *v8::Utils::OpenPersistent(env);
+
+ env.Reset();
+
+ SnapshotByteSink startup_sink;
+ StartupSerializer startup_serializer(isolate, &startup_sink);
+ startup_serializer.SerializeStrongReferences();
+
+ SnapshotByteSink partial_sink;
+ PartialSerializer partial_serializer(isolate, &startup_serializer,
+ &partial_sink);
+ partial_serializer.Serialize(&raw_context);
+ startup_serializer.SerializeWeakReferences();
+
+ SnapshotData startup_snapshot(startup_serializer);
+ SnapshotData partial_snapshot(partial_serializer);
+
+ WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
+ WritePayload(startup_snapshot.RawData(), startup_name.start());
+
+ startup_name.Dispose();
+ }
+ v8_isolate->Dispose();
+ }
+}
+
+
+UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
+ CustomContextSerialization) {
+ FLAG_crankshaft = false;
+ if (!Snapshot::HaveASnapshotToStartFrom()) {
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+ v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+ CHECK(v8_isolate);
+ startup_name.Dispose();
+ {
+ v8::Isolate::Scope isolate_scope(v8_isolate);
+
+ const char* file_name = FLAG_testing_serialization_file;
+
+ int snapshot_size = 0;
+ byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ HandleScope handle_scope(isolate);
+ Handle<Object> root;
+ Handle<FixedArray> outdated_contexts;
+ Handle<JSGlobalProxy> global_proxy =
+ isolate->factory()->NewUninitializedJSGlobalProxy();
+ {
+ SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ Deserializer deserializer(&snapshot_data);
+ root = deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts)
+ .ToHandleChecked();
+ CHECK_EQ(2, outdated_contexts->length());
+ CHECK(root->IsContext());
+ Handle<Context> context = Handle<Context>::cast(root);
+ CHECK(context->global_proxy() == *global_proxy);
+ Handle<String> o = isolate->factory()->NewStringFromAsciiChecked("o");
+ Handle<JSObject> global_object(context->global_object(), isolate);
+ Handle<Object> property = JSObject::GetDataProperty(global_object, o);
+ CHECK(property.is_identical_to(global_proxy));
+
+ v8::Handle<v8::Context> v8_context = v8::Utils::ToLocal(context);
+ v8::Context::Scope context_scope(v8_context);
+ double r = CompileRun("r")->ToNumber(v8_isolate)->Value();
+ CHECK(r >= 1 && r <= 2);
+ int f = CompileRun("f()")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(5, f);
+ f = CompileRun("e('f()')")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(5, f);
+ v8::Handle<v8::String> s = CompileRun("s")->ToString(v8_isolate);
+ CHECK(s->Equals(v8_str("12345")));
+ int a = CompileRun("a.length")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(100001, a);
+ int b = CompileRun("b.length")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(100002, b);
}
}
v8_isolate->Dispose();
@@ -554,6 +732,16 @@ int CountBuiltins() {
}
+static Handle<SharedFunctionInfo> CompileScript(
+ Isolate* isolate, Handle<String> source, Handle<String> name,
+ ScriptData** cached_data, v8::ScriptCompiler::CompileOptions options) {
+ return Compiler::CompileScript(source, name, 0, 0, false, false,
+ Handle<Context>(isolate->native_context()),
+ NULL, cached_data, options, NOT_NATIVES_CODE,
+ false);
+}
+
+
TEST(SerializeToplevelOnePlusOne) {
FLAG_serialize_toplevel = true;
LocalContext context;
@@ -575,20 +763,17 @@ TEST(SerializeToplevelOnePlusOne) {
ScriptData* cache = NULL;
- Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
- orig_source, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+ Handle<SharedFunctionInfo> orig =
+ CompileScript(isolate, orig_source, Handle<String>(), &cache,
+ v8::ScriptCompiler::kProduceCodeCache);
int builtins_count = CountBuiltins();
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = Compiler::CompileScript(
- copy_source, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ copy = CompileScript(isolate, copy_source, Handle<String>(), &cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -608,6 +793,37 @@ TEST(SerializeToplevelOnePlusOne) {
}
+TEST(CodeCachePromotedToCompilationCache) {
+ FLAG_serialize_toplevel = true;
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+
+ v8::HandleScope scope(CcTest::isolate());
+
+ const char* source = "1 + 1";
+
+ Handle<String> src = isolate->factory()
+ ->NewStringFromUtf8(CStrVector(source))
+ .ToHandleChecked();
+ ScriptData* cache = NULL;
+
+ CompileScript(isolate, src, src, &cache,
+ v8::ScriptCompiler::kProduceCodeCache);
+
+ DisallowCompilation no_compile_expected(isolate);
+ Handle<SharedFunctionInfo> copy = CompileScript(
+ isolate, src, src, &cache, v8::ScriptCompiler::kConsumeCodeCache);
+
+ CHECK(isolate->compilation_cache()
+ ->LookupScript(src, src, 0, 0, false, false,
+ isolate->native_context(), SLOPPY)
+ .ToHandleChecked()
+ .is_identical_to(copy));
+
+ delete cache;
+}
+
+
TEST(SerializeToplevelInternalizedString) {
FLAG_serialize_toplevel = true;
LocalContext context;
@@ -630,10 +846,9 @@ TEST(SerializeToplevelInternalizedString) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
- Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
- orig_source, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+ Handle<SharedFunctionInfo> orig =
+ CompileScript(isolate, orig_source, Handle<String>(), &cache,
+ v8::ScriptCompiler::kProduceCodeCache);
Handle<JSFunction> orig_fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
orig, isolate->native_context());
@@ -646,10 +861,8 @@ TEST(SerializeToplevelInternalizedString) {
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = Compiler::CompileScript(
- copy_source, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ copy = CompileScript(isolate, copy_source, Handle<String>(), &cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
CHECK(Script::cast(copy->script())->source() == *copy_source);
@@ -671,23 +884,6 @@ TEST(SerializeToplevelInternalizedString) {
}
-Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
- Vector<const uint8_t> body,
- Vector<const uint8_t> tail, int repeats) {
- int source_length = head.length() + body.length() * repeats + tail.length();
- uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
- CopyChars(source, head.start(), head.length());
- for (int i = 0; i < repeats; i++) {
- CopyChars(source + head.length() + i * body.length(), body.start(),
- body.length());
- }
- CopyChars(source + head.length() + repeats * body.length(), tail.start(),
- tail.length());
- return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
- source_length);
-}
-
-
TEST(SerializeToplevelLargeCodeObject) {
FLAG_serialize_toplevel = true;
LocalContext context;
@@ -706,20 +902,17 @@ TEST(SerializeToplevelLargeCodeObject) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
- Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
- source_str, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+ Handle<SharedFunctionInfo> orig =
+ CompileScript(isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kProduceCodeCache);
CHECK(isolate->heap()->InSpace(orig->code(), LO_SPACE));
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = Compiler::CompileScript(
- source_str, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ copy = CompileScript(isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -762,18 +955,15 @@ TEST(SerializeToplevelLargeStrings) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
- Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
- source_str, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+ Handle<SharedFunctionInfo> orig =
+ CompileScript(isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kProduceCodeCache);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = Compiler::CompileScript(
- source_str, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ copy = CompileScript(isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -835,18 +1025,15 @@ TEST(SerializeToplevelThreeBigStrings) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
- Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
- source_str, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+ Handle<SharedFunctionInfo> orig =
+ CompileScript(isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kProduceCodeCache);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = Compiler::CompileScript(
- source_str, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ copy = CompileScript(isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -943,18 +1130,15 @@ TEST(SerializeToplevelExternalString) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
- Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
- source_string, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+ Handle<SharedFunctionInfo> orig =
+ CompileScript(isolate, source_string, Handle<String>(), &cache,
+ v8::ScriptCompiler::kProduceCodeCache);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = Compiler::CompileScript(
- source_string, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ copy = CompileScript(isolate, source_string, Handle<String>(), &cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -1005,18 +1189,15 @@ TEST(SerializeToplevelLargeExternalString) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
- Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
- source_str, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+ Handle<SharedFunctionInfo> orig =
+ CompileScript(isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kProduceCodeCache);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = Compiler::CompileScript(
- source_str, Handle<String>(), 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ copy = CompileScript(isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -1059,18 +1240,15 @@ TEST(SerializeToplevelExternalScriptName) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = NULL;
- Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
- source_string, name, 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+ Handle<SharedFunctionInfo> orig =
+ CompileScript(isolate, source_string, name, &cache,
+ v8::ScriptCompiler::kProduceCodeCache);
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = Compiler::CompileScript(
- source_string, name, 0, 0, false,
- Handle<Context>(isolate->native_context()), NULL, &cache,
- v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+ copy = CompileScript(isolate, source_string, name, &cache,
+ v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -1097,12 +1275,8 @@ static void SerializerCodeEventListener(const v8::JitCodeEvent* event) {
}
-TEST(SerializeToplevelIsolates) {
- FLAG_serialize_toplevel = true;
-
- const char* source = "function f() { return 'abc'; }; f() + 'def'";
+v8::ScriptCompiler::CachedData* ProduceCache(const char* source) {
v8::ScriptCompiler::CachedData* cache;
-
v8::Isolate* isolate1 = v8::Isolate::New();
{
v8::Isolate::Scope iscope(isolate1);
@@ -1127,6 +1301,15 @@ TEST(SerializeToplevelIsolates) {
CHECK(result->ToString(isolate1)->Equals(v8_str("abcdef")));
}
isolate1->Dispose();
+ return cache;
+}
+
+
+TEST(SerializeToplevelIsolates) {
+ FLAG_serialize_toplevel = true;
+
+ const char* source = "function f() { return 'abc'; }; f() + 'def'";
+ v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
v8::Isolate* isolate2 = v8::Isolate::New();
isolate2->SetJitCodeEventHandler(v8::kJitCodeEventDefault,
@@ -1160,35 +1343,37 @@ TEST(SerializeToplevelFlagChange) {
FLAG_serialize_toplevel = true;
const char* source = "function f() { return 'abc'; }; f() + 'def'";
- v8::ScriptCompiler::CachedData* cache;
+ v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
- v8::Isolate* isolate1 = v8::Isolate::New();
+ v8::Isolate* isolate2 = v8::Isolate::New();
+ FLAG_allow_natives_syntax = true; // Flag change should trigger cache reject.
{
- v8::Isolate::Scope iscope(isolate1);
- v8::HandleScope scope(isolate1);
- v8::Local<v8::Context> context = v8::Context::New(isolate1);
+ v8::Isolate::Scope iscope(isolate2);
+ v8::HandleScope scope(isolate2);
+ v8::Local<v8::Context> context = v8::Context::New(isolate2);
v8::Context::Scope context_scope(context);
v8::Local<v8::String> source_str = v8_str(source);
v8::ScriptOrigin origin(v8_str("test"));
- v8::ScriptCompiler::Source source(source_str, origin);
- v8::Local<v8::UnboundScript> script = v8::ScriptCompiler::CompileUnbound(
- isolate1, &source, v8::ScriptCompiler::kProduceCodeCache);
- const v8::ScriptCompiler::CachedData* data = source.GetCachedData();
- CHECK(data);
- // Persist cached data.
- uint8_t* buffer = NewArray<uint8_t>(data->length);
- MemCopy(buffer, data->data, data->length);
- cache = new v8::ScriptCompiler::CachedData(
- buffer, data->length, v8::ScriptCompiler::CachedData::BufferOwned);
-
- v8::Local<v8::Value> result = script->BindToCurrentContext()->Run();
- CHECK(result->ToString(isolate1)->Equals(v8_str("abcdef")));
+ v8::ScriptCompiler::Source source(source_str, origin, cache);
+ v8::ScriptCompiler::CompileUnbound(isolate2, &source,
+ v8::ScriptCompiler::kConsumeCodeCache);
+ CHECK(cache->rejected);
}
- isolate1->Dispose();
+ isolate2->Dispose();
+}
+
+
+TEST(SerializeToplevelBitFlip) {
+ FLAG_serialize_toplevel = true;
+
+ const char* source = "function f() { return 'abc'; }; f() + 'def'";
+ v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
+
+ // Random bit flip.
+ const_cast<uint8_t*>(cache->data)[337] ^= 0x40;
v8::Isolate* isolate2 = v8::Isolate::New();
- FLAG_allow_natives_syntax = true; // Flag change should trigger cache reject.
{
v8::Isolate::Scope iscope(isolate2);
v8::HandleScope scope(isolate2);
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/test-spaces.cc
index a84b867f8d..331ea02510 100644
--- a/deps/v8/test/cctest/test-spaces.cc
+++ b/deps/v8/test/cctest/test-spaces.cc
@@ -459,18 +459,6 @@ TEST(SizeOfFirstPageIsLargeEnough) {
}
-static inline void FillCurrentPage(v8::internal::NewSpace* space) {
- int new_linear_size = static_cast<int>(*space->allocation_limit_address() -
- *space->allocation_top_address());
- if (new_linear_size == 0) return;
- v8::internal::AllocationResult allocation =
- space->AllocateRaw(new_linear_size);
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(space->heap(), new_linear_size);
-}
-
-
UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
FLAG_target_semi_space_size = 2;
if (FLAG_optimize_for_size) return;
@@ -502,9 +490,9 @@ UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
// Turn the allocation into a proper object so isolate teardown won't
// crash.
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(new_space->heap(), 80);
+ HeapObject* free_space = NULL;
+ CHECK(allocation.To(&free_space));
+ new_space->heap()->CreateFillerObjectAt(free_space->address(), 80);
}
}
isolate->Dispose();
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index d1f23f75a6..9a4e96ffd5 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1038,8 +1038,9 @@ TEST(JSONStringifySliceMadeExternal) {
CHECK(v8::Utils::OpenHandle(*slice)->IsSlicedString());
CHECK(v8::Utils::OpenHandle(*underlying)->IsExternalTwoByteString());
- CHECK_EQ("\"bcdefghijklmnopqrstuvwxyz\"",
- *v8::String::Utf8Value(CompileRun("JSON.stringify(slice)")));
+ CHECK_EQ(0,
+ strcmp("\"bcdefghijklmnopqrstuvwxyz\"",
+ *v8::String::Utf8Value(CompileRun("JSON.stringify(slice)"))));
}
@@ -1171,7 +1172,7 @@ TEST(TrivialSlice) {
CHECK(result->IsString());
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(string->IsSlicedString());
- CHECK_EQ("bcdefghijklmnopqrstuvwxy", string->ToCString().get());
+ CHECK_EQ(0, strcmp("bcdefghijklmnopqrstuvwxy", string->ToCString().get()));
}
@@ -1193,14 +1194,14 @@ TEST(SliceFromSlice) {
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(string->IsSlicedString());
CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
- CHECK_EQ("bcdefghijklmnopqrstuvwxy", string->ToCString().get());
+ CHECK_EQ(0, strcmp("bcdefghijklmnopqrstuvwxy", string->ToCString().get()));
result = CompileRun(slice_from_slice);
CHECK(result->IsString());
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(string->IsSlicedString());
CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
- CHECK_EQ("cdefghijklmnopqrstuvwx", string->ToCString().get());
+ CHECK_EQ(0, strcmp("cdefghijklmnopqrstuvwx", string->ToCString().get()));
}
@@ -1208,7 +1209,7 @@ UNINITIALIZED_TEST(OneByteArrayJoin) {
v8::Isolate::CreateParams create_params;
// Set heap limits.
create_params.constraints.set_max_semi_space_size(1);
- create_params.constraints.set_max_old_space_size(4);
+ create_params.constraints.set_max_old_space_size(5);
v8::Isolate* isolate = v8::Isolate::New(create_params);
isolate->Enter();
@@ -1263,7 +1264,7 @@ TEST(RobustSubStringStub) {
// Ordinary HeapNumbers can be handled (in runtime).
result = CompileRun("%_SubString(short, Math.sqrt(4), 5.1);");
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
- CHECK_EQ("cde", string->ToCString().get());
+ CHECK_EQ(0, strcmp("cde", string->ToCString().get()));
CompileRun("var long = 'abcdefghijklmnopqrstuvwxyz';");
// Invalid indices.
@@ -1278,7 +1279,7 @@ TEST(RobustSubStringStub) {
// Ordinary HeapNumbers within bounds can be handled (in runtime).
result = CompileRun("%_SubString(long, Math.sqrt(4), 17.1);");
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
- CHECK_EQ("cdefghijklmnopq", string->ToCString().get());
+ CHECK_EQ(0, strcmp("cdefghijklmnopq", string->ToCString().get()));
// Test that out-of-bounds substring of a slice fails when the indices
// would have been valid for the underlying string.
@@ -1445,6 +1446,7 @@ TEST(InvalidExternalString) {
static const int invalid = String::kMaxLength + 1; \
HandleScope scope(isolate); \
Vector<TYPE> dummy = Vector<TYPE>::New(invalid); \
+ memset(dummy.start(), 0x0, dummy.length() * sizeof(TYPE)); \
CHECK(isolate->factory()->FUN(Vector<const TYPE>::cast(dummy)).is_null()); \
memset(dummy.start(), 0x20, dummy.length() * sizeof(TYPE)); \
CHECK(isolate->has_pending_exception()); \
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index 066c997037..bb2d117b25 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -28,7 +28,7 @@ TEST(Create) {
CHECK(symbols[i]->IsName());
CHECK(symbols[i]->IsSymbol());
CHECK(symbols[i]->HasHashCode());
- CHECK_GT(symbols[i]->Hash(), 0);
+ CHECK_GT(symbols[i]->Hash(), 0u);
os << Brief(*symbols[i]) << "\n";
#if OBJECT_PRINT
symbols[i]->Print(os);
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 21d3b95f10..d31b4131df 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -270,31 +270,23 @@ TEST(TerminateLoadICException) {
}
+v8::Persistent<v8::String> reenter_script_1;
+v8::Persistent<v8::String> reenter_script_2;
+
void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
- CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
- "function f() {"
- " var term = true;"
- " try {"
- " while(true) {"
- " if (term) terminate();"
- " term = false;"
- " }"
- " fail();"
- " } catch(e) {"
- " fail();"
- " }"
- "}"
- "f()"))->Run();
+ v8::Isolate* isolate = args.GetIsolate();
+ CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ v8::Local<v8::String> script =
+ v8::Local<v8::String>::New(isolate, reenter_script_1);
+ v8::Script::Compile(script)->Run();
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
- "function f() { fail(); } f()"))
- ->Run();
+ CHECK(v8::V8::IsExecutionTerminating(isolate));
+ script = v8::Local<v8::String>::New(isolate, reenter_script_2);
+ v8::Script::Compile(script)->Run();
}
@@ -309,17 +301,28 @@ TEST(TerminateAndReenterFromThreadItself) {
v8::Context::New(isolate, NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
- v8::Handle<v8::String> source = v8::String::NewFromUtf8(
- isolate, "try { loop(); fail(); } catch(e) { fail(); }");
- v8::Script::Compile(source)->Run();
+ // Create script strings upfront as it won't work when terminating.
+ reenter_script_1.Reset(isolate, v8_str(
+ "function f() {"
+ " var term = true;"
+ " try {"
+ " while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ " }"
+ " fail();"
+ " } catch(e) {"
+ " fail();"
+ " }"
+ "}"
+ "f()"));
+ reenter_script_2.Reset(isolate, v8_str("function f() { fail(); } f()"));
+ CompileRun("try { loop(); fail(); } catch(e) { fail(); }");
CHECK(!v8::V8::IsExecutionTerminating(isolate));
// Check we can run JS again after termination.
- CHECK(v8::Script::Compile(
- v8::String::NewFromUtf8(isolate,
- "function f() { return true; }"
- "f()"))
- ->Run()
- ->IsTrue());
+ CHECK(CompileRun("function f() { return true; } f()")->IsTrue());
+ reenter_script_1.Reset();
+ reenter_script_2.Reset();
}
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index 6bcdb35e7e..59c9f74c96 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -41,10 +41,11 @@ static void CheckPropertyDetailsFieldsConsistency(PropertyType type,
TEST(PropertyDetailsFieldsConsistency) {
- CheckPropertyDetailsFieldsConsistency(FIELD, DATA, IN_OBJECT);
- CheckPropertyDetailsFieldsConsistency(CONSTANT, DATA, IN_DESCRIPTOR);
- CheckPropertyDetailsFieldsConsistency(ACCESSOR_FIELD, ACCESSOR, IN_OBJECT);
- CheckPropertyDetailsFieldsConsistency(CALLBACKS, ACCESSOR, IN_DESCRIPTOR);
+ CheckPropertyDetailsFieldsConsistency(DATA, kData, kField);
+ CheckPropertyDetailsFieldsConsistency(DATA_CONSTANT, kData, kDescriptor);
+ CheckPropertyDetailsFieldsConsistency(ACCESSOR, kAccessor, kField);
+ CheckPropertyDetailsFieldsConsistency(ACCESSOR_CONSTANT, kAccessor,
+ kDescriptor);
}
@@ -77,7 +78,7 @@ TEST(TransitionArray_SimpleFieldTransitions) {
transitions->Insert(map0, name1, map1, SIMPLE_PROPERTY_TRANSITION);
ConnectTransition(map0, transitions, map1);
CHECK(transitions->IsSimpleTransition());
- transition = transitions->Search(DATA, *name1, attributes);
+ transition = transitions->Search(kData, *name1, attributes);
CHECK_EQ(TransitionArray::kSimpleTransitionIndex, transition);
CHECK_EQ(*name1, transitions->GetKey(transition));
CHECK_EQ(*map1, transitions->GetTarget(transition));
@@ -87,11 +88,11 @@ TEST(TransitionArray_SimpleFieldTransitions) {
ConnectTransition(map0, transitions, map2);
CHECK(transitions->IsFullTransitionArray());
- transition = transitions->Search(DATA, *name1, attributes);
+ transition = transitions->Search(kData, *name1, attributes);
CHECK_EQ(*name1, transitions->GetKey(transition));
CHECK_EQ(*map1, transitions->GetTarget(transition));
- transition = transitions->Search(DATA, *name2, attributes);
+ transition = transitions->Search(kData, *name2, attributes);
CHECK_EQ(*name2, transitions->GetKey(transition));
CHECK_EQ(*map2, transitions->GetTarget(transition));
@@ -127,7 +128,7 @@ TEST(TransitionArray_FullFieldTransitions) {
transitions = transitions->Insert(map0, name1, map1, PROPERTY_TRANSITION);
ConnectTransition(map0, transitions, map1);
CHECK(transitions->IsFullTransitionArray());
- transition = transitions->Search(DATA, *name1, attributes);
+ transition = transitions->Search(kData, *name1, attributes);
CHECK_EQ(*name1, transitions->GetKey(transition));
CHECK_EQ(*map1, transitions->GetTarget(transition));
@@ -135,11 +136,11 @@ TEST(TransitionArray_FullFieldTransitions) {
ConnectTransition(map0, transitions, map2);
CHECK(transitions->IsFullTransitionArray());
- transition = transitions->Search(DATA, *name1, attributes);
+ transition = transitions->Search(kData, *name1, attributes);
CHECK_EQ(*name1, transitions->GetKey(transition));
CHECK_EQ(*map1, transitions->GetTarget(transition));
- transition = transitions->Search(DATA, *name2, attributes);
+ transition = transitions->Search(kData, *name2, attributes);
CHECK_EQ(*name2, transitions->GetKey(transition));
CHECK_EQ(*map2, transitions->GetTarget(transition));
@@ -179,7 +180,7 @@ TEST(TransitionArray_DifferentFieldNames) {
}
for (int i = 0; i < PROPS_COUNT; i++) {
- int transition = transitions->Search(DATA, *names[i], attributes);
+ int transition = transitions->Search(kData, *names[i], attributes);
CHECK_EQ(*names[i], transitions->GetKey(transition));
CHECK_EQ(*maps[i], transitions->GetTarget(transition));
}
@@ -222,7 +223,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributesSimple) {
for (int i = 0; i < ATTRS_COUNT; i++) {
PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
- int transition = transitions->Search(DATA, *name, attributes);
+ int transition = transitions->Search(kData, *name, attributes);
CHECK_EQ(*name, transitions->GetKey(transition));
CHECK_EQ(*attr_maps[i], transitions->GetTarget(transition));
}
@@ -285,14 +286,14 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
for (int i = 0; i < ATTRS_COUNT; i++) {
PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
- int transition = transitions->Search(DATA, *name, attributes);
+ int transition = transitions->Search(kData, *name, attributes);
CHECK_EQ(*name, transitions->GetKey(transition));
CHECK_EQ(*attr_maps[i], transitions->GetTarget(transition));
}
// Ensure that info about the other fields still valid.
for (int i = 0; i < PROPS_COUNT; i++) {
- int transition = transitions->Search(DATA, *names[i], NONE);
+ int transition = transitions->Search(kData, *names[i], NONE);
CHECK_EQ(*names[i], transitions->GetKey(transition));
CHECK_EQ(*maps[i], transitions->GetTarget(transition));
}
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index ebef527dd0..f9b6bf862f 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -36,7 +36,8 @@ struct ZoneRep {
return !IsBitset(t) && reinterpret_cast<intptr_t>(AsStruct(t)[0]) == tag;
}
static bool IsBitset(Type* t) { return reinterpret_cast<uintptr_t>(t) & 1; }
- static bool IsUnion(Type* t) { return IsStruct(t, 6); }
+ // HACK: the number 5 below is the value of StructuralType::kUnionTag.
+ static bool IsUnion(Type* t) { return t->IsUnionForTesting(); }
static Struct* AsStruct(Type* t) {
return reinterpret_cast<Struct*>(t);
@@ -69,7 +70,8 @@ struct HeapRep {
return t->IsFixedArray() && Smi::cast(AsStruct(t)->get(0))->value() == tag;
}
static bool IsBitset(Handle<HeapType> t) { return t->IsSmi(); }
- static bool IsUnion(Handle<HeapType> t) { return IsStruct(t, 6); }
+ // HACK: the number 5 below is the value of StructuralType::kUnionTag.
+ static bool IsUnion(Handle<HeapType> t) { return t->IsUnionForTesting(); }
static Struct* AsStruct(Handle<HeapType> t) { return FixedArray::cast(*t); }
static bitset AsBitset(Handle<HeapType> t) {
@@ -103,12 +105,12 @@ struct Tests : Rep {
Zone zone;
TypesInstance T;
- Tests() :
- isolate(CcTest::i_isolate()),
- scope(isolate),
- zone(isolate),
- T(Rep::ToRegion(&zone, isolate), isolate) {
- }
+ Tests()
+ : isolate(CcTest::i_isolate()),
+ scope(isolate),
+ zone(),
+ T(Rep::ToRegion(&zone, isolate), isolate,
+ isolate->random_number_generator()) {}
bool Equal(TypeHandle type1, TypeHandle type2) {
return
@@ -136,6 +138,14 @@ struct Tests : Rep {
}
}
+ void CheckSubOrEqual(TypeHandle type1, TypeHandle type2) {
+ CHECK(type1->Is(type2));
+ if (this->IsBitset(type1) && this->IsBitset(type2)) {
+ CHECK((this->AsBitset(type1) | this->AsBitset(type2))
+ == this->AsBitset(type2));
+ }
+ }
+
void CheckUnordered(TypeHandle type1, TypeHandle type2) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
@@ -225,17 +235,85 @@ struct Tests : Rep {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
TypeHandle type1 = *it1;
TypeHandle type2 = *it2;
- TypeHandle intersect12 = T.Intersect(type1, type2);
if (this->IsBitset(type1) && this->IsBitset(type2)) {
+ TypeHandle intersect12 = T.Intersect(type1, type2);
bitset bits = this->AsBitset(type1) & this->AsBitset(type2);
- CHECK(
- (Rep::BitsetType::IsInhabited(bits) ? bits : 0) ==
- this->AsBitset(intersect12));
+ CHECK(bits == this->AsBitset(intersect12));
}
}
}
}
+ void PointwiseRepresentation() {
+ // Check we can decompose type into semantics and representation and
+ // then compose it back to get an equivalent type.
+ int counter = 0;
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ counter++;
+ printf("Counter: %i\n", counter);
+ fflush(stdout);
+ TypeHandle type1 = *it1;
+ TypeHandle representation = T.Representation(type1);
+ TypeHandle semantic = T.Semantic(type1);
+ TypeHandle composed = T.Union(representation, semantic);
+ CHECK(type1->Equals(composed));
+ }
+
+ // Pointwiseness of Union.
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle type1 = *it1;
+ TypeHandle type2 = *it2;
+ TypeHandle representation1 = T.Representation(type1);
+ TypeHandle semantic1 = T.Semantic(type1);
+ TypeHandle representation2 = T.Representation(type2);
+ TypeHandle semantic2 = T.Semantic(type2);
+ TypeHandle direct_union = T.Union(type1, type2);
+ TypeHandle representation_union =
+ T.Union(representation1, representation2);
+ TypeHandle semantic_union = T.Union(semantic1, semantic2);
+ TypeHandle composed_union =
+ T.Union(representation_union, semantic_union);
+ CHECK(direct_union->Equals(composed_union));
+ }
+ }
+
+ // Pointwiseness of Intersect.
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle type1 = *it1;
+ TypeHandle type2 = *it2;
+ TypeHandle representation1 = T.Representation(type1);
+ TypeHandle semantic1 = T.Semantic(type1);
+ TypeHandle representation2 = T.Representation(type2);
+ TypeHandle semantic2 = T.Semantic(type2);
+ TypeHandle direct_intersection = T.Intersect(type1, type2);
+ TypeHandle representation_intersection =
+ T.Intersect(representation1, representation2);
+ TypeHandle semantic_intersection = T.Intersect(semantic1, semantic2);
+ TypeHandle composed_intersection =
+ T.Union(representation_intersection, semantic_intersection);
+ CHECK(direct_intersection->Equals(composed_intersection));
+ }
+ }
+
+ // Pointwiseness of Is.
+ for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+ for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+ TypeHandle type1 = *it1;
+ TypeHandle type2 = *it2;
+ TypeHandle representation1 = T.Representation(type1);
+ TypeHandle semantic1 = T.Semantic(type1);
+ TypeHandle representation2 = T.Representation(type2);
+ TypeHandle semantic2 = T.Semantic(type2);
+ bool representation_is = representation1->Is(representation2);
+ bool semantic_is = semantic1->Is(semantic2);
+ bool direct_is = type1->Is(type2);
+ CHECK(direct_is == (semantic_is && representation_is));
+ }
+ }
+ }
+
void Class() {
// Constructor
for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
@@ -294,39 +372,33 @@ struct Tests : Rep {
CHECK(T.Constant(fac->NewNumber(0))->Is(T.UnsignedSmall));
CHECK(T.Constant(fac->NewNumber(1))->Is(T.UnsignedSmall));
CHECK(T.Constant(fac->NewNumber(0x3fffffff))->Is(T.UnsignedSmall));
- CHECK(T.Constant(fac->NewNumber(-1))->Is(T.NegativeSignedSmall));
- CHECK(T.Constant(fac->NewNumber(-0x3fffffff))->Is(T.NegativeSignedSmall));
- CHECK(T.Constant(fac->NewNumber(-0x40000000))->Is(T.NegativeSignedSmall));
+ CHECK(T.Constant(fac->NewNumber(-1))->Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(-0x3fffffff))->Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(-0x40000000))->Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.Unsigned31));
+ CHECK(!T.Constant(fac->NewNumber(0x40000000))->Is(T.Unsigned30));
+ CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned31));
+ CHECK(!T.Constant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned30));
+ CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.Negative32));
+ CHECK(!T.Constant(fac->NewNumber(-0x40000001))->Is(T.Negative31));
+ CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.Negative32));
+ CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.Negative31));
if (SmiValuesAre31Bits()) {
- CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.NonNegativeSigned32));
CHECK(!T.Constant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
- CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.NonNegativeSigned32));
CHECK(!T.Constant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
- CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.NegativeSigned32));
- CHECK(
- !T.Constant(fac->NewNumber(-0x40000001))->Is(T.NegativeSignedSmall));
- CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.NegativeSigned32));
- CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 1))
- ->Is(T.NegativeSignedSmall));
+ CHECK(!T.Constant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
+ CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
} else {
CHECK(SmiValuesAre32Bits());
CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
- CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.NonNegativeSigned32));
- CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.NonNegativeSigned32));
- CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.NegativeSignedSmall));
- CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.NegativeSignedSmall));
- CHECK(T.Constant(fac->NewNumber(-0x7fffffff - 1))
- ->Is(T.NegativeSignedSmall));
- CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.NegativeSigned32));
- CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.NegativeSigned32));
- CHECK(
- T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.NegativeSigned32));
+ CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
+ CHECK(T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
}
CHECK(T.Constant(fac->NewNumber(0x80000000u))->Is(T.Unsigned32));
- CHECK(!T.Constant(fac->NewNumber(0x80000000u))->Is(T.NonNegativeSigned32));
+ CHECK(!T.Constant(fac->NewNumber(0x80000000u))->Is(T.Unsigned31));
CHECK(T.Constant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned32));
- CHECK(!T.Constant(fac->NewNumber(0xffffffffu))->Is(T.NonNegativeSigned32));
+ CHECK(!T.Constant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned31));
CHECK(T.Constant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.PlainNumber));
CHECK(!T.Constant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.Integral32));
CHECK(T.Constant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.PlainNumber));
@@ -338,7 +410,8 @@ struct Tests : Rep {
CHECK(T.Constant(fac->NewNumber(10e60))->Is(T.PlainNumber));
CHECK(!T.Constant(fac->NewNumber(10e60))->Is(T.Integral32));
CHECK(T.Constant(fac->NewNumber(-1.0*0.0))->Is(T.MinusZero));
- CHECK(T.Constant(fac->NewNumber(v8::base::OS::nan_value()))->Is(T.NaN));
+ CHECK(T.Constant(fac->NewNumber(std::numeric_limits<double>::quiet_NaN()))
+ ->Is(T.NaN));
CHECK(T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.PlainNumber));
CHECK(!T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.Integral32));
CHECK(T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.PlainNumber));
@@ -349,9 +422,9 @@ struct Tests : Rep {
// Constructor
for (ValueIterator i = T.integers.begin(); i != T.integers.end(); ++i) {
for (ValueIterator j = T.integers.begin(); j != T.integers.end(); ++j) {
- i::Handle<i::Object> min = *i;
- i::Handle<i::Object> max = *j;
- if (min->Number() > max->Number()) std::swap(min, max);
+ double min = (*i)->Number();
+ double max = (*j)->Number();
+ if (min > max) std::swap(min, max);
TypeHandle type = T.Range(min, max);
CHECK(type->IsRange());
}
@@ -360,12 +433,12 @@ struct Tests : Rep {
// Range attributes
for (ValueIterator i = T.integers.begin(); i != T.integers.end(); ++i) {
for (ValueIterator j = T.integers.begin(); j != T.integers.end(); ++j) {
- i::Handle<i::Object> min = *i;
- i::Handle<i::Object> max = *j;
- if (min->Number() > max->Number()) std::swap(min, max);
+ double min = (*i)->Number();
+ double max = (*j)->Number();
+ if (min > max) std::swap(min, max);
TypeHandle type = T.Range(min, max);
- CHECK(*min == *type->AsRange()->Min());
- CHECK(*max == *type->AsRange()->Max());
+ CHECK(min == type->AsRange()->Min());
+ CHECK(max == type->AsRange()->Max());
}
}
@@ -379,15 +452,15 @@ struct Tests : Rep {
i2 != T.integers.end(); ++i2) {
for (ValueIterator j2 = i2;
j2 != T.integers.end(); ++j2) {
- i::Handle<i::Object> min1 = *i1;
- i::Handle<i::Object> max1 = *j1;
- i::Handle<i::Object> min2 = *i2;
- i::Handle<i::Object> max2 = *j2;
- if (min1->Number() > max1->Number()) std::swap(min1, max1);
- if (min2->Number() > max2->Number()) std::swap(min2, max2);
+ double min1 = (*i1)->Number();
+ double max1 = (*j1)->Number();
+ double min2 = (*i2)->Number();
+ double max2 = (*j2)->Number();
+ if (min1 > max1) std::swap(min1, max1);
+ if (min2 > max2) std::swap(min2, max2);
TypeHandle type1 = T.Range(min1, max1);
TypeHandle type2 = T.Range(min2, max2);
- CHECK(Equal(type1, type2) == (*min1 == *min2 && *max1 == *max2));
+ CHECK(Equal(type1, type2) == (min1 == min2 && max1 == max2));
}
}
}
@@ -606,8 +679,6 @@ struct Tests : Rep {
}
void MinMax() {
- Factory* fac = isolate->factory();
-
// If b is regular numeric bitset, then Range(b->Min(), b->Max())->Is(b).
// TODO(neis): Need to ignore representation for this to be true.
/*
@@ -655,13 +726,12 @@ struct Tests : Rep {
}
}
- // Rangification: If T->Is(Range(-inf,+inf)) and !T->Is(None), then
+ // Rangification: If T->Is(Range(-inf,+inf)) and T is inhabited, then
// T->Is(Range(T->Min(), T->Max())).
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
TypeHandle type = *it;
- CHECK(!(type->Is(T.Integer) && !type->Is(T.None)) ||
- type->Is(T.Range(fac->NewNumber(type->Min()),
- fac->NewNumber(type->Max()))));
+ CHECK(!type->Is(T.Integer) || !type->IsInhabited() ||
+ type->Is(T.Range(type->Min(), type->Max())));
}
}
@@ -795,11 +865,12 @@ struct Tests : Rep {
(type1->IsClass() && type2->IsClass()) ||
(type1->IsConstant() && type2->IsConstant()) ||
(type1->IsConstant() && type2->IsRange()) ||
+ (this->IsBitset(type1) && type2->IsRange()) ||
(type1->IsRange() && type2->IsRange()) ||
(type1->IsContext() && type2->IsContext()) ||
(type1->IsArray() && type2->IsArray()) ||
(type1->IsFunction() && type2->IsFunction()) ||
- type1->Equals(T.None));
+ !type1->IsInhabited());
}
}
}
@@ -825,17 +896,15 @@ struct Tests : Rep {
i2 != T.integers.end(); ++i2) {
for (ValueIterator j2 = i2;
j2 != T.integers.end(); ++j2) {
- i::Handle<i::Object> min1 = *i1;
- i::Handle<i::Object> max1 = *j1;
- i::Handle<i::Object> min2 = *i2;
- i::Handle<i::Object> max2 = *j2;
- if (min1->Number() > max1->Number()) std::swap(min1, max1);
- if (min2->Number() > max2->Number()) std::swap(min2, max2);
+ double min1 = (*i1)->Number();
+ double max1 = (*j1)->Number();
+ double min2 = (*i2)->Number();
+ double max2 = (*j2)->Number();
+ if (min1 > max1) std::swap(min1, max1);
+ if (min2 > max2) std::swap(min2, max2);
TypeHandle type1 = T.Range(min1, max1);
TypeHandle type2 = T.Range(min2, max2);
- CHECK(type1->Is(type2) ==
- (min1->Number() >= min2->Number() &&
- max1->Number() <= max2->Number()));
+ CHECK(type1->Is(type2) == (min1 >= min2 && max1 <= max2));
}
}
}
@@ -895,8 +964,8 @@ struct Tests : Rep {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
TypeHandle type = *it;
if (type->IsConstant() && IsInteger(*type->AsConstant()->Value())) {
- CHECK(type->Is(
- T.Range(type->AsConstant()->Value(), type->AsConstant()->Value())));
+ CHECK(type->Is(T.Range(type->AsConstant()->Value()->Number(),
+ type->AsConstant()->Value()->Number())));
}
}
@@ -907,8 +976,8 @@ struct Tests : Rep {
TypeHandle type2 = *it2;
if (type1->IsConstant() && type2->IsRange() && type1->Is(type2)) {
double x = type1->AsConstant()->Value()->Number();
- double min = type2->AsRange()->Min()->Number();
- double max = type2->AsRange()->Max()->Number();
+ double min = type2->AsRange()->Min();
+ double max = type2->AsRange()->Max();
CHECK(IsInteger(x) && min <= x && x <= max);
}
}
@@ -933,7 +1002,7 @@ struct Tests : Rep {
CheckSub(T.SignedSmall, T.Number);
CheckSub(T.Signed32, T.Number);
- CheckSub(T.SignedSmall, T.Signed32);
+ CheckSubOrEqual(T.SignedSmall, T.Signed32);
CheckUnordered(T.SignedSmall, T.MinusZero);
CheckUnordered(T.Signed32, T.Unsigned32);
@@ -1305,7 +1374,7 @@ struct Tests : Rep {
CheckDisjoint(T.SignedFunction1, T.MethodFunction);
CheckOverlap(T.ObjectConstant1, T.ObjectClass); // !!!
CheckOverlap(T.ObjectConstant2, T.ObjectClass); // !!!
- CheckOverlap(T.NumberClass, T.Intersect(T.Number, T.Untagged)); // !!!
+ CheckOverlap(T.NumberClass, T.Intersect(T.Number, T.Tagged)); // !!!
}
void Union1() {
@@ -1477,8 +1546,8 @@ struct Tests : Rep {
CheckDisjoint(T.Union(T.NumberFunction1, T.String), T.Number);
// Bitset-class
- CheckSub(
- T.Union(T.ObjectClass, T.SignedSmall), T.Union(T.Object, T.Number));
+ CheckSub(T.Union(T.ObjectClass, T.SignedSmall),
+ T.Union(T.Object, T.Number));
CheckSub(T.Union(T.ObjectClass, T.Array), T.Object);
CheckUnordered(T.Union(T.ObjectClass, T.String), T.Array);
CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
@@ -1548,11 +1617,9 @@ struct Tests : Rep {
T.Union(T.ObjectConstant2, T.ObjectConstant1),
T.Union(T.ObjectConstant1, T.ObjectConstant2)),
T.Union(T.ObjectConstant2, T.ObjectConstant1));
- CheckEqual(
- T.Union(
- T.Union(T.Number, T.ArrayClass),
- T.Union(T.SignedSmall, T.Array)),
- T.Union(T.Number, T.Array));
+ CheckEqual(T.Union(T.Union(T.Number, T.ArrayClass),
+ T.Union(T.SignedSmall, T.Array)),
+ T.Union(T.Number, T.Array));
}
void Intersect() {
@@ -1696,24 +1763,24 @@ struct Tests : Rep {
// Bitset-class
CheckEqual(T.Intersect(T.ObjectClass, T.Object), T.ObjectClass);
- CheckEqual(T.Intersect(T.ObjectClass, T.Array), T.None);
- CheckEqual(T.Intersect(T.ObjectClass, T.Number), T.None);
+ CheckEqual(T.Semantic(T.Intersect(T.ObjectClass, T.Array)), T.None);
+ CheckEqual(T.Semantic(T.Intersect(T.ObjectClass, T.Number)), T.None);
// Bitset-array
CheckEqual(T.Intersect(T.NumberArray, T.Object), T.NumberArray);
- CheckEqual(T.Intersect(T.AnyArray, T.Proxy), T.None);
+ CheckEqual(T.Semantic(T.Intersect(T.AnyArray, T.Proxy)), T.None);
// Bitset-function
CheckEqual(T.Intersect(T.MethodFunction, T.Object), T.MethodFunction);
- CheckEqual(T.Intersect(T.NumberFunction1, T.Proxy), T.None);
+ CheckEqual(T.Semantic(T.Intersect(T.NumberFunction1, T.Proxy)), T.None);
// Bitset-union
CheckEqual(
T.Intersect(T.Object, T.Union(T.ObjectConstant1, T.ObjectClass)),
T.Union(T.ObjectConstant1, T.ObjectClass));
- CHECK(
- !T.Intersect(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number)
- ->IsInhabited());
+ CheckEqual(T.Semantic(T.Intersect(T.Union(T.ArrayClass, T.ObjectConstant1),
+ T.Number)),
+ T.None);
// Class-constant
CHECK(T.Intersect(T.ObjectConstant1, T.ObjectClass)->IsInhabited()); // !!!
@@ -1766,11 +1833,9 @@ struct Tests : Rep {
->IsInhabited()); // !!!
// Union-union
- CheckEqual(
- T.Intersect(
- T.Union(T.Number, T.ArrayClass),
- T.Union(T.SignedSmall, T.Array)),
- T.Union(T.SignedSmall, T.ArrayClass));
+ CheckEqual(T.Intersect(T.Union(T.Number, T.ArrayClass),
+ T.Union(T.SignedSmall, T.Array)),
+ T.Union(T.SignedSmall, T.ArrayClass));
CheckEqual(
T.Intersect(
T.Union(T.Number, T.ObjectClass),
@@ -1849,8 +1914,8 @@ struct Tests : Rep {
TypeHandle type1 = *it1;
if (type1->IsRange()) {
typename Type::RangeType* range = type1->GetRange();
- CHECK(type1->Min() == range->Min()->Number());
- CHECK(type1->Max() == range->Max()->Number());
+ CHECK(type1->Min() == range->Min());
+ CHECK(type1->Max() == range->Max());
}
}
@@ -1862,8 +1927,8 @@ struct Tests : Rep {
if (type1->IsConstant() && type2->IsRange()) {
TypeHandle u = T.Union(type1, type2);
- CHECK(type2->Min() == u->GetRange()->Min()->Number());
- CHECK(type2->Max() == u->GetRange()->Max()->Number());
+ CHECK(type2->Min() == u->GetRange()->Min());
+ CHECK(type2->Max() == u->GetRange()->Max());
}
}
}
@@ -1871,8 +1936,9 @@ struct Tests : Rep {
template<class Type2, class TypeHandle2, class Region2, class Rep2>
void Convert() {
- Types<Type2, TypeHandle2, Region2> T2(
- Rep2::ToRegion(&zone, isolate), isolate);
+ Types<Type2, TypeHandle2, Region2> T2(Rep2::ToRegion(&zone, isolate),
+ isolate,
+ isolate->random_number_generator());
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
TypeHandle type1 = *it;
TypeHandle2 type2 = T2.template Convert<Type>(type1);
@@ -1905,6 +1971,13 @@ TEST(IsSomeType) {
}
+TEST(PointwiseRepresentation) {
+ CcTest::InitializeVM();
+ // ZoneTests().PointwiseRepresentation();
+ HeapTests().PointwiseRepresentation();
+}
+
+
TEST(BitsetType) {
CcTest::InitializeVM();
ZoneTests().Bitset();
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index a12bf47f96..fdcac3af35 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -21,6 +21,33 @@ using namespace v8::internal;
#if (V8_DOUBLE_FIELDS_UNBOXING)
+//
+// Helper functions.
+//
+
+
+static void InitializeVerifiedMapDescriptors(
+ Map* map, DescriptorArray* descriptors,
+ LayoutDescriptor* layout_descriptor) {
+ map->InitializeDescriptors(descriptors, layout_descriptor);
+ CHECK(layout_descriptor->IsConsistentWithMap(map));
+}
+
+
+static Handle<String> MakeString(const char* str) {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ return factory->InternalizeUtf8String(str);
+}
+
+
+static Handle<String> MakeName(const char* str, int suffix) {
+ EmbeddedVector<char, 128> buffer;
+ SNPrintF(buffer, "%s%d", str, suffix);
+ return MakeString(buffer.start());
+}
+
+
static double GetDoubleFieldValue(JSObject* obj, FieldIndex field_index) {
if (obj->IsUnboxedDoubleField(field_index)) {
return obj->RawFastDoublePropertyAt(field_index);
@@ -67,11 +94,11 @@ static Handle<DescriptorArray> CreateDescriptorArray(Isolate* isolate,
TestPropertyKind kind = props[i];
if (kind == PROP_CONSTANT) {
- ConstantDescriptor d(name, func, NONE);
+ DataConstantDescriptor d(name, func, NONE);
descriptors->Append(&d);
} else {
- FieldDescriptor f(name, next_field_offset, NONE, representations[kind]);
+ DataDescriptor f(name, next_field_offset, NONE, representations[kind]);
next_field_offset += f.GetDetails().field_width_in_words();
descriptors->Append(&f);
}
@@ -138,8 +165,7 @@ TEST(LayoutDescriptorBasicSlow) {
layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
CHECK_EQ(kSmiValueSize, layout_descriptor->capacity());
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
}
props[0] = PROP_DOUBLE;
@@ -162,8 +188,7 @@ TEST(LayoutDescriptorBasicSlow) {
for (int i = 1; i < kPropsCount; i++) {
CHECK_EQ(true, layout_descriptor->IsTagged(i));
}
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
}
{
@@ -182,8 +207,7 @@ TEST(LayoutDescriptorBasicSlow) {
CHECK_EQ(true, layout_descriptor->IsTagged(i));
}
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
// Here we have truly slow layout descriptor, so play with the bits.
CHECK_EQ(true, layout_descriptor->IsTagged(-1));
@@ -200,8 +224,7 @@ TEST(LayoutDescriptorBasicSlow) {
}
CHECK(layout_desc->IsSlowLayout());
CHECK(!layout_desc->IsFastPointerLayout());
-
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ CHECK(layout_descriptor->IsConsistentWithMap(*map));
}
}
@@ -463,16 +486,14 @@ TEST(LayoutDescriptorCreateNewFast) {
Handle<Map> map = Map::Create(isolate, 0);
layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
}
{
Handle<Map> map = Map::Create(isolate, 1);
layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
}
{
@@ -484,8 +505,7 @@ TEST(LayoutDescriptorCreateNewFast) {
CHECK_EQ(false, layout_descriptor->IsTagged(1));
CHECK_EQ(true, layout_descriptor->IsTagged(2));
CHECK_EQ(true, layout_descriptor->IsTagged(125));
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
}
}
@@ -509,16 +529,14 @@ TEST(LayoutDescriptorCreateNewSlow) {
Handle<Map> map = Map::Create(isolate, 0);
layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
}
{
Handle<Map> map = Map::Create(isolate, 1);
layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
}
{
@@ -530,8 +548,7 @@ TEST(LayoutDescriptorCreateNewSlow) {
CHECK_EQ(false, layout_descriptor->IsTagged(1));
CHECK_EQ(true, layout_descriptor->IsTagged(2));
CHECK_EQ(true, layout_descriptor->IsTagged(125));
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
}
{
@@ -541,7 +558,7 @@ TEST(LayoutDescriptorCreateNewSlow) {
CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
CHECK(layout_descriptor->IsSlowLayout());
for (int i = 0; i < inobject_properties; i++) {
- // PROP_DOUBLE has index 1 among FIELD properties.
+ // PROP_DOUBLE has index 1 among DATA properties.
const bool tagged = (i % (PROP_KIND_NUMBER - 1)) != 1;
CHECK_EQ(tagged, layout_descriptor->IsTagged(i));
}
@@ -549,8 +566,7 @@ TEST(LayoutDescriptorCreateNewSlow) {
for (int i = inobject_properties; i < kPropsCount; i++) {
CHECK_EQ(true, layout_descriptor->IsTagged(i));
}
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
// Now test LayoutDescriptor::cast_gc_safe().
Handle<LayoutDescriptor> layout_descriptor_copy =
@@ -600,15 +616,15 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
Handle<LayoutDescriptor> layout_descriptor;
TestPropertyKind kind = props[i];
if (kind == PROP_CONSTANT) {
- ConstantDescriptor d(name, func, NONE);
- layout_descriptor = LayoutDescriptor::Append(map, d.GetDetails());
+ DataConstantDescriptor d(name, func, NONE);
+ layout_descriptor = LayoutDescriptor::ShareAppend(map, d.GetDetails());
descriptors->Append(&d);
} else {
- FieldDescriptor f(name, next_field_offset, NONE, representations[kind]);
+ DataDescriptor f(name, next_field_offset, NONE, representations[kind]);
int field_width_in_words = f.GetDetails().field_width_in_words();
next_field_offset += field_width_in_words;
- layout_descriptor = LayoutDescriptor::Append(map, f.GetDetails());
+ layout_descriptor = LayoutDescriptor::ShareAppend(map, f.GetDetails());
descriptors->Append(&f);
int field_index = f.GetDetails().field_index();
@@ -622,7 +638,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
map->InitializeDescriptors(*descriptors, *layout_descriptor);
}
Handle<LayoutDescriptor> layout_descriptor(map->layout_descriptor(), isolate);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ CHECK(layout_descriptor->IsConsistentWithMap(*map));
return layout_descriptor;
}
@@ -736,7 +752,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
CHECK_EQ(*full_layout_descriptor, layout_desc);
} else {
CHECK(!switched_to_slow_mode);
- if (details.type() == FIELD) {
+ if (details.type() == DATA) {
nof++;
int field_index = details.field_index();
int field_width_in_words = details.field_width_in_words();
@@ -749,12 +765,12 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
CHECK(layout_desc->IsTagged(field_index + field_width_in_words));
}
}
- DCHECK(map->layout_descriptor()->IsConsistentWithMap(*map));
+ CHECK(map->layout_descriptor()->IsConsistentWithMap(*map));
}
Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
isolate);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ CHECK(layout_descriptor->IsConsistentWithMap(*map));
return layout_descriptor;
}
@@ -959,8 +975,7 @@ static void TestLayoutDescriptorHelper(Isolate* isolate,
Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::New(
map, descriptors, descriptors->number_of_descriptors());
- map->InitializeDescriptors(*descriptors, *layout_descriptor);
- DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+ InitializeVerifiedMapDescriptors(*map, *descriptors, *layout_descriptor);
LayoutDescriptorHelper helper(*map);
bool all_fields_tagged = true;
@@ -971,7 +986,7 @@ static void TestLayoutDescriptorHelper(Isolate* isolate,
int first_non_tagged_field_offset = end_offset;
for (int i = 0; i < number_of_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
+ if (details.type() != DATA) continue;
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
if (!index.is_inobject()) continue;
all_fields_tagged &= !details.representation().IsDouble();
@@ -1096,6 +1111,52 @@ TEST(LayoutDescriptorHelperAllDoubles) {
}
+TEST(LayoutDescriptorSharing) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ Handle<Map> split_map;
+ {
+ Handle<Map> map = Map::Create(isolate, 64);
+ for (int i = 0; i < 32; i++) {
+ Handle<String> name = MakeName("prop", i);
+ map = Map::CopyWithField(map, name, any_type, NONE, Representation::Smi(),
+ INSERT_TRANSITION).ToHandleChecked();
+ }
+ split_map = Map::CopyWithField(map, MakeString("dbl"), any_type, NONE,
+ Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
+ }
+ Handle<LayoutDescriptor> split_layout_descriptor(
+ split_map->layout_descriptor(), isolate);
+ CHECK(split_layout_descriptor->IsConsistentWithMap(*split_map));
+ CHECK(split_layout_descriptor->IsSlowLayout());
+ CHECK(split_map->owns_descriptors());
+
+ Handle<Map> map1 = Map::CopyWithField(split_map, MakeString("foo"), any_type,
+ NONE, Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
+ CHECK(!split_map->owns_descriptors());
+ CHECK_EQ(*split_layout_descriptor, split_map->layout_descriptor());
+
+ // Layout descriptors should be shared with |split_map|.
+ CHECK(map1->owns_descriptors());
+ CHECK_EQ(*split_layout_descriptor, map1->layout_descriptor());
+ CHECK(map1->layout_descriptor()->IsConsistentWithMap(*map1));
+
+ Handle<Map> map2 = Map::CopyWithField(split_map, MakeString("bar"), any_type,
+ NONE, Representation::Tagged(),
+ INSERT_TRANSITION).ToHandleChecked();
+
+ // Layout descriptors should not be shared with |split_map|.
+ CHECK(map2->owns_descriptors());
+ CHECK_NE(*split_layout_descriptor, map2->layout_descriptor());
+ CHECK(map2->layout_descriptor()->IsConsistentWithMap(*map2));
+}
+
+
TEST(StoreBufferScanOnScavenge) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -1159,4 +1220,89 @@ TEST(StoreBufferScanOnScavenge) {
CHECK_EQ(boom_value, GetDoubleFieldValue(*obj, field_index));
}
+
+static int LenFromSize(int size) {
+ return (size - FixedArray::kHeaderSize) / kPointerSize;
+}
+
+
+TEST(WriteBarriersInCopyJSObject) {
+ FLAG_max_semi_space_size = 1; // Ensure new space is not growing.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ TestHeap* heap = CcTest::test_heap();
+
+ v8::HandleScope scope(CcTest::isolate());
+
+ // The plan: create JSObject which contains unboxed double value that looks
+ // like a reference to an object in new space.
+ // Then clone this object (forcing it to go into old space) and check
+ // that the value of the unboxed double property of the cloned object has
+ // was not corrupted by GC.
+
+ // Step 1: prepare a map for the object. We add unboxed double property to it.
+ // Create a map with single inobject property.
+ Handle<Map> my_map = Map::Create(isolate, 1);
+ Handle<String> name = isolate->factory()->InternalizeUtf8String("foo");
+ my_map = Map::CopyWithField(my_map, name, HeapType::Any(isolate), NONE,
+ Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
+ my_map->set_pre_allocated_property_fields(1);
+ int n_properties = my_map->InitialPropertiesLength();
+ CHECK_GE(n_properties, 0);
+
+ int object_size = my_map->instance_size();
+
+ // Step 2: allocate a lot of objects so to almost fill new space: we need
+ // just enough room to allocate JSObject and thus fill the newspace.
+
+ int allocation_amount =
+ Min(FixedArray::kMaxSize, Page::kMaxRegularHeapObjectSize + kPointerSize);
+ int allocation_len = LenFromSize(allocation_amount);
+ NewSpace* new_space = heap->new_space();
+ Address* top_addr = new_space->allocation_top_address();
+ Address* limit_addr = new_space->allocation_limit_address();
+ while ((*limit_addr - *top_addr) > allocation_amount) {
+ CHECK(!heap->always_allocate());
+ Object* array = heap->AllocateFixedArray(allocation_len).ToObjectChecked();
+ CHECK(new_space->Contains(array));
+ }
+
+ // Step 3: now allocate fixed array and JSObject to fill the whole new space.
+ int to_fill = static_cast<int>(*limit_addr - *top_addr - object_size);
+ int fixed_array_len = LenFromSize(to_fill);
+ CHECK(fixed_array_len < FixedArray::kMaxLength);
+
+ CHECK(!heap->always_allocate());
+ Object* array = heap->AllocateFixedArray(fixed_array_len).ToObjectChecked();
+ CHECK(new_space->Contains(array));
+
+ Object* object = heap->AllocateJSObjectFromMap(*my_map).ToObjectChecked();
+ CHECK(new_space->Contains(object));
+ JSObject* jsobject = JSObject::cast(object);
+ CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
+ CHECK_EQ(0, jsobject->properties()->length());
+
+ // Construct a double value that looks like a pointer to the new space object
+ // and store it into the obj.
+ Address fake_object = reinterpret_cast<Address>(array) + kPointerSize;
+ double boom_value = bit_cast<double>(fake_object);
+ FieldIndex index = FieldIndex::ForDescriptor(*my_map, 0);
+ jsobject->RawFastDoublePropertyAtPut(index, boom_value);
+
+ CHECK_EQ(0, static_cast<int>(*limit_addr - *top_addr));
+
+ // Step 4: clone jsobject, but force always allocate first to create a clone
+ // in old pointer space.
+ AlwaysAllocateScope aa_scope(isolate);
+ Object* clone_obj = heap->CopyJSObject(jsobject).ToObjectChecked();
+ Handle<JSObject> clone(JSObject::cast(clone_obj));
+ CHECK(heap->old_pointer_space()->Contains(clone->address()));
+
+ CcTest::heap()->CollectGarbage(NEW_SPACE, "boom");
+
+ // The value in cloned object should not be corrupted by GC.
+ CHECK_EQ(boom_value, clone->RawFastDoublePropertyAt(index));
+}
+
#endif
diff --git a/deps/v8/test/cctest/test-unique.cc b/deps/v8/test/cctest/test-unique.cc
index 302539a96d..15b800dc05 100644
--- a/deps/v8/test/cctest/test-unique.cc
+++ b/deps/v8/test/cctest/test-unique.cc
@@ -143,7 +143,7 @@ TEST(UniqueSet_Add) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(isolate);
+ Zone zone;
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -170,7 +170,7 @@ TEST(UniqueSet_Remove) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(isolate);
+ Zone zone;
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -210,7 +210,7 @@ TEST(UniqueSet_Contains) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(isolate);
+ Zone zone;
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -241,7 +241,7 @@ TEST(UniqueSet_At) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(isolate);
+ Zone zone;
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -278,7 +278,7 @@ TEST(UniqueSet_Equals) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(isolate);
+ Zone zone;
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -316,7 +316,7 @@ TEST(UniqueSet_IsSubset1) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(isolate);
+ Zone zone;
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -351,7 +351,7 @@ TEST(UniqueSet_IsSubset2) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone(isolate);
+ Zone zone;
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -394,7 +394,7 @@ TEST(UniqueSet_IsSubsetExhaustive) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone(isolate);
+ Zone zone;
Unique<String> elements[] = {
A, B, C, D, E, F, G
@@ -417,7 +417,7 @@ TEST(UniqueSet_Intersect1) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(isolate);
+ Zone zone;
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -458,7 +458,7 @@ TEST(UniqueSet_IntersectExhaustive) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone(isolate);
+ Zone zone;
Unique<String> elements[] = {
A, B, C, D, E, F, G
@@ -485,7 +485,7 @@ TEST(UniqueSet_Union1) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(isolate);
+ Zone zone;
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -526,7 +526,7 @@ TEST(UniqueSet_UnionExhaustive) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone(isolate);
+ Zone zone;
Unique<String> elements[] = {
A, B, C, D, E, F, G
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index 05a12f5c23..46adc0b23d 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -72,7 +72,7 @@ TEST(Utils1) {
CHECK_EQ(INT_MAX, FastD2IChecked(1.0e100));
CHECK_EQ(INT_MIN, FastD2IChecked(-1.0e100));
- CHECK_EQ(INT_MIN, FastD2IChecked(v8::base::OS::nan_value()));
+ CHECK_EQ(INT_MIN, FastD2IChecked(std::numeric_limits<double>::quiet_NaN()));
}
diff --git a/deps/v8/test/cctest/test-version.cc b/deps/v8/test/cctest/test-version.cc
index 231451d11a..7de4467d6b 100644
--- a/deps/v8/test/cctest/test-version.cc
+++ b/deps/v8/test/cctest/test-version.cc
@@ -59,17 +59,17 @@ static void CheckVersion(int major, int minor, int build,
// Test version without specific SONAME.
SetVersion(major, minor, build, patch, candidate, "");
Version::GetString(version_str);
- CHECK_EQ(expected_version_string, version_str.start());
+ CHECK_EQ(0, strcmp(expected_version_string, version_str.start()));
Version::GetSONAME(soname_str);
- CHECK_EQ(expected_generic_soname, soname_str.start());
+ CHECK_EQ(0, strcmp(expected_generic_soname, soname_str.start()));
// Test version with specific SONAME.
const char* soname = "libv8.so.1";
SetVersion(major, minor, build, patch, candidate, soname);
Version::GetString(version_str);
- CHECK_EQ(expected_version_string, version_str.start());
+ CHECK_EQ(0, strcmp(expected_version_string, version_str.start()));
Version::GetSONAME(soname_str);
- CHECK_EQ(soname, soname_str.start());
+ CHECK_EQ(0, strcmp(soname, soname_str.start()));
}
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 6c19cb8e47..04f41b9aee 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -42,10 +42,7 @@ static Isolate* GetIsolateFrom(LocalContext* context) {
static Handle<JSWeakMap> AllocateJSWeakMap(Isolate* isolate) {
- Factory* factory = isolate->factory();
- Handle<Map> map = factory->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
- Handle<JSObject> weakmap_obj = factory->NewJSObjectFromMap(map);
- Handle<JSWeakMap> weakmap(JSWeakMap::cast(*weakmap_obj));
+ Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
// Do not leak handles for the hash table, it would make entries strong.
{
HandleScope scope(isolate);
@@ -55,16 +52,6 @@ static Handle<JSWeakMap> AllocateJSWeakMap(Isolate* isolate) {
return weakmap;
}
-static void PutIntoWeakMap(Handle<JSWeakMap> weakmap,
- Handle<JSObject> key,
- Handle<Object> value) {
- Handle<ObjectHashTable> table = ObjectHashTable::Put(
- Handle<ObjectHashTable>(ObjectHashTable::cast(weakmap->table())),
- Handle<JSObject>(JSObject::cast(*key)),
- value);
- weakmap->set_table(*table);
-}
-
static int NumberOfWeakCalls = 0;
static void WeakPointerCallback(
const v8::WeakCallbackData<v8::Value, void>& data) {
@@ -102,8 +89,9 @@ TEST(Weakness) {
HandleScope scope(isolate);
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
- PutIntoWeakMap(weakmap, Handle<JSObject>(JSObject::cast(*key)), object);
- PutIntoWeakMap(weakmap, object, Handle<Smi>(Smi::FromInt(23), isolate));
+ Handle<Smi> smi(Smi::FromInt(23), isolate);
+ Runtime::WeakCollectionSet(weakmap, key, object);
+ Runtime::WeakCollectionSet(weakmap, object, smi);
}
CHECK_EQ(2, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
@@ -157,7 +145,8 @@ TEST(Shrinking) {
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
for (int i = 0; i < 32; i++) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
- PutIntoWeakMap(weakmap, object, Handle<Smi>(Smi::FromInt(i), isolate));
+ Handle<Smi> smi(Smi::FromInt(i), isolate);
+ Runtime::WeakCollectionSet(weakmap, object, smi);
}
}
@@ -204,7 +193,7 @@ TEST(Regress2060a) {
Handle<JSObject> object = factory->NewJSObject(function, TENURED);
CHECK(!heap->InNewSpace(object->address()));
CHECK(!first_page->Contains(object->address()));
- PutIntoWeakMap(weakmap, key, object);
+ Runtime::WeakCollectionSet(weakmap, key, object);
}
}
@@ -244,9 +233,8 @@ TEST(Regress2060b) {
}
Handle<JSWeakMap> weakmap = AllocateJSWeakMap(isolate);
for (int i = 0; i < 32; i++) {
- PutIntoWeakMap(weakmap,
- keys[i],
- Handle<Smi>(Smi::FromInt(i), isolate));
+ Handle<Smi> smi(Smi::FromInt(i), isolate);
+ Runtime::WeakCollectionSet(weakmap, keys[i], smi);
}
// Force compacting garbage collection. The subsequent collections are used
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 299cc92e9b..f08a99bcbf 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -55,16 +55,6 @@ static Handle<JSWeakSet> AllocateJSWeakSet(Isolate* isolate) {
return weakset;
}
-static void PutIntoWeakSet(Handle<JSWeakSet> weakset,
- Handle<JSObject> key,
- Handle<Object> value) {
- Handle<ObjectHashTable> table = ObjectHashTable::Put(
- Handle<ObjectHashTable>(ObjectHashTable::cast(weakset->table())),
- Handle<JSObject>(JSObject::cast(*key)),
- value);
- weakset->set_table(*table);
-}
-
static int NumberOfWeakCalls = 0;
static void WeakPointerCallback(
const v8::WeakCallbackData<v8::Value, void>& data) {
@@ -100,9 +90,8 @@ TEST(WeakSet_Weakness) {
// Put entry into weak set.
{
HandleScope scope(isolate);
- PutIntoWeakSet(weakset,
- Handle<JSObject>(JSObject::cast(*key)),
- Handle<Smi>(Smi::FromInt(23), isolate));
+ Handle<Smi> smi(Smi::FromInt(23), isolate);
+ Runtime::WeakCollectionSet(weakset, key, smi);
}
CHECK_EQ(1, ObjectHashTable::cast(weakset->table())->NumberOfElements());
@@ -156,7 +145,8 @@ TEST(WeakSet_Shrinking) {
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
for (int i = 0; i < 32; i++) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
- PutIntoWeakSet(weakset, object, Handle<Smi>(Smi::FromInt(i), isolate));
+ Handle<Smi> smi(Smi::FromInt(i), isolate);
+ Runtime::WeakCollectionSet(weakset, object, smi);
}
}
@@ -203,7 +193,7 @@ TEST(WeakSet_Regress2060a) {
Handle<JSObject> object = factory->NewJSObject(function, TENURED);
CHECK(!heap->InNewSpace(object->address()));
CHECK(!first_page->Contains(object->address()));
- PutIntoWeakSet(weakset, key, object);
+ Runtime::WeakCollectionSet(weakset, key, object);
}
}
@@ -243,9 +233,8 @@ TEST(WeakSet_Regress2060b) {
}
Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
for (int i = 0; i < 32; i++) {
- PutIntoWeakSet(weakset,
- keys[i],
- Handle<Smi>(Smi::FromInt(i), isolate));
+ Handle<Smi> smi(Smi::FromInt(i), isolate);
+ Runtime::WeakCollectionSet(weakset, keys[i], smi);
}
// Force compacting garbage collection. The subsequent collections are used
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index 0a1ff87d1c..a95532f931 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -28,6 +28,7 @@
#include "test/cctest/trace-extension.h"
#include "src/sampler.h"
+#include "src/vm-state-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -97,6 +98,11 @@ void TraceExtension::DoTrace(Address fp) {
void TraceExtension::Trace(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ i::VMState<EXTERNAL> state(isolate);
+ Address address = reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(&TraceExtension::Trace));
+ i::ExternalCallbackScope call_scope(isolate, address);
DoTrace(GetFP(args));
}
@@ -114,19 +120,24 @@ static void DoTraceHideCEntryFPAddress(Address fp) {
void TraceExtension::JSTrace(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ i::VMState<EXTERNAL> state(isolate);
+ Address address = reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(&TraceExtension::JSTrace));
+ i::ExternalCallbackScope call_scope(isolate, address);
DoTraceHideCEntryFPAddress(GetFP(args));
}
Address TraceExtension::GetJsEntrySp() {
- CHECK_NE(NULL, CcTest::i_isolate()->thread_local_top());
+ CHECK(CcTest::i_isolate()->thread_local_top());
return CcTest::i_isolate()->js_entry_sp();
}
void TraceExtension::JSEntrySP(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK_NE(0, GetJsEntrySp());
+ CHECK(GetJsEntrySp());
}
@@ -134,7 +145,7 @@ void TraceExtension::JSEntrySPLevel2(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
const Address js_entry_sp = GetJsEntrySp();
- CHECK_NE(0, js_entry_sp);
+ CHECK(js_entry_sp);
CompileRun("js_entry_sp();");
CHECK_EQ(js_entry_sp, GetJsEntrySp());
}
diff --git a/deps/v8/test/cctest/types-fuzz.h b/deps/v8/test/cctest/types-fuzz.h
index 4eac64c838..6864cd2df1 100644
--- a/deps/v8/test/cctest/types-fuzz.h
+++ b/deps/v8/test/cctest/types-fuzz.h
@@ -28,6 +28,7 @@
#ifndef V8_TEST_CCTEST_TYPES_H_
#define V8_TEST_CCTEST_TYPES_H_
+#include "src/base/utils/random-number-generator.h"
#include "src/v8.h"
namespace v8 {
@@ -37,14 +38,17 @@ namespace internal {
template<class Type, class TypeHandle, class Region>
class Types {
public:
- Types(Region* region, Isolate* isolate)
- : region_(region), rng_(isolate->random_number_generator()) {
+ Types(Region* region, Isolate* isolate, v8::base::RandomNumberGenerator* rng)
+ : region_(region), rng_(rng) {
#define DECLARE_TYPE(name, value) \
name = Type::name(region); \
types.push_back(name);
PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
+ SignedSmall = Type::SignedSmall(region);
+ UnsignedSmall = Type::UnsignedSmall(region);
+
object_map = isolate->factory()->NewMap(
JS_OBJECT_TYPE, JSObject::kHeaderSize);
array_map = isolate->factory()->NewMap(
@@ -98,8 +102,7 @@ class Types {
if (!IsMinusZero(x)) integers.push_back(isolate->factory()->NewNumber(x));
}
- Integer = Type::Range(isolate->factory()->NewNumber(-V8_INFINITY),
- isolate->factory()->NewNumber(+V8_INFINITY), region);
+ Integer = Type::Range(-V8_INFINITY, +V8_INFINITY, region);
NumberArray = Type::Array(Number, region);
StringArray = Type::Array(String, region);
@@ -131,6 +134,12 @@ class Types {
PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
+#define DECLARE_TYPE(name, value) TypeHandle Mask##name##ForTesting;
+ MASK_BITSET_TYPE_LIST(DECLARE_TYPE)
+#undef DECLARE_TYPE
+ TypeHandle SignedSmall;
+ TypeHandle UnsignedSmall;
+
TypeHandle ObjectClass;
TypeHandle ArrayClass;
TypeHandle NumberClass;
@@ -179,7 +188,7 @@ class Types {
return Type::Constant(value, region_);
}
- TypeHandle Range(Handle<i::Object> min, Handle<i::Object> max) {
+ TypeHandle Range(double min, double max) {
return Type::Range(min, max, region_);
}
@@ -208,10 +217,19 @@ class Types {
TypeHandle Union(TypeHandle t1, TypeHandle t2) {
return Type::Union(t1, t2, region_);
}
+
TypeHandle Intersect(TypeHandle t1, TypeHandle t2) {
return Type::Intersect(t1, t2, region_);
}
+ TypeHandle Representation(TypeHandle t) {
+ return Type::Representation(t, region_);
+ }
+
+ // TypeHandle Semantic(TypeHandle t) { return Intersect(t,
+ // MaskSemanticForTesting); }
+ TypeHandle Semantic(TypeHandle t) { return Type::Semantic(t, region_); }
+
template<class Type2, class TypeHandle2>
TypeHandle Convert(TypeHandle2 t) {
return Type::template Convert<Type2>(t, region_);
@@ -258,9 +276,9 @@ class Types {
case 3: { // range
int i = rng_->NextInt(static_cast<int>(integers.size()));
int j = rng_->NextInt(static_cast<int>(integers.size()));
- i::Handle<i::Object> min = integers[i];
- i::Handle<i::Object> max = integers[j];
- if (min->Number() > max->Number()) std::swap(min, max);
+ double min = integers[i]->Number();
+ double max = integers[j]->Number();
+ if (min > max) std::swap(min, max);
return Type::Range(min, max, region_);
}
case 4: { // context
diff --git a/deps/v8/test/js-perf-test/Classes/super.js b/deps/v8/test/js-perf-test/Classes/super.js
index a9ec766688..0c12b8df00 100644
--- a/deps/v8/test/js-perf-test/Classes/super.js
+++ b/deps/v8/test/js-perf-test/Classes/super.js
@@ -10,39 +10,37 @@ var SuperBenchmark = new BenchmarkSuite('Super', [100], [
]);
-function Base() { }
-Base.prototype = {
- constructor: Base,
+class Base {
+ constructor() {}
get x() {
return this._x++;
- },
+ }
set x(v) {
this._x += v;
return this._x;
}
+ f() {
+ return this._x++;
+ }
}
-Base.prototype.f = function() {
- return this._x++;
-}.toMethod(Base.prototype);
-function Derived() {
- this._x = 1;
+class Derived extends Base {
+ constructor() {
+ super();
+ this._x = 1;
+ }
+ SuperCall() {
+ return super.f();
+ }
+ GetterCall() {
+ return super.x;
+ }
+ SetterCall() {
+ return super.x = 5;
+ }
}
-Derived.prototype = Object.create(Base.prototype);
-Object.setPrototypeOf(Derived, Base);
-
-Derived.prototype.SuperCall = function() {
- return super.f();
-}.toMethod(Derived.prototype);
-
-Derived.prototype.GetterCall = function() {
- return super.x;
-}.toMethod(Derived.prototype);
-Derived.prototype.SetterCall = function() {
- return super.x = 5;
-}.toMethod(Derived.prototype);
var derived = new Derived();
diff --git a/deps/v8/test/message/super-constructor-extra-statement.out b/deps/v8/test/message/super-constructor-extra-statement.out
index cbe1e0704f..0faa3bea0d 100644
--- a/deps/v8/test/message/super-constructor-extra-statement.out
+++ b/deps/v8/test/message/super-constructor-extra-statement.out
@@ -1,8 +1,8 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:10: TypeError: A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported.
- var x;
-
-TypeError: A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported.
- at *%(basename)s:15:9
+
+*%(basename)s:11: SyntaxError: 'super' keyword unexpected here
+ super(x);
+ ^^^^^
+SyntaxError: 'super' keyword unexpected here
diff --git a/deps/v8/test/message/super-constructor.out b/deps/v8/test/message/super-constructor.out
index bc3a699dbe..3fa546bd45 100644
--- a/deps/v8/test/message/super-constructor.out
+++ b/deps/v8/test/message/super-constructor.out
@@ -1,8 +1,7 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:10: TypeError: A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported.
+*%(basename)s:10: SyntaxError: 'super' keyword unexpected here
super(this.x);
-
-TypeError: A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported.
- at *%(basename)s:14:9
+ ^^^^^
+SyntaxError: 'super' keyword unexpected here
diff --git a/deps/v8/test/message/super-in-function.js b/deps/v8/test/message/super-in-function.js
new file mode 100644
index 0000000000..edaa0e4ead
--- /dev/null
+++ b/deps/v8/test/message/super-in-function.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+'use strict';
+
+function f() {
+ super.x();
+}
diff --git a/deps/v8/test/message/super-in-function.out b/deps/v8/test/message/super-in-function.out
new file mode 100644
index 0000000000..19f8bf067c
--- /dev/null
+++ b/deps/v8/test/message/super-in-function.out
@@ -0,0 +1,7 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:9: SyntaxError: 'super' keyword unexpected here
+ super.x();
+ ^^^^^
+SyntaxError: 'super' keyword unexpected here
diff --git a/deps/v8/test/mjsunit/accessors-no-prototype.js b/deps/v8/test/mjsunit/accessors-no-prototype.js
new file mode 100644
index 0000000000..9c429fc48a
--- /dev/null
+++ b/deps/v8/test/mjsunit/accessors-no-prototype.js
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+(function TestGetter() {
+ var o = {
+ get x() {}
+ };
+ var desc = Object.getOwnPropertyDescriptor(o, 'x');
+ assertEquals('function', typeof desc.get);
+ assertFalse('prototype' in desc.get);
+
+ assertThrows(function() {
+ new desc.get();
+ }, TypeError);
+})();
+
+
+(function TestSetter() {
+ var o = {
+ set x(_) {}
+ };
+ var desc = Object.getOwnPropertyDescriptor(o, 'x');
+ assertEquals('function', typeof desc.set);
+ assertFalse('prototype' in desc.set);
+
+ assertThrows(function() {
+ new desc.set();
+ }, TypeError);
+})();
+
+
+(function TestBoth() {
+ var o = {
+ get x() {},
+ set x(_) {}
+ };
+ var desc = Object.getOwnPropertyDescriptor(o, 'x');
+ assertEquals('function', typeof desc.get);
+ assertEquals('function', typeof desc.set);
+ assertFalse('prototype' in desc.get);
+ assertFalse('prototype' in desc.set);
+
+ assertThrows(function() {
+ new desc.get();
+ }, TypeError);
+ assertThrows(function() {
+ new desc.set();
+ }, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/array-push12.js b/deps/v8/test/mjsunit/array-push12.js
new file mode 100644
index 0000000000..f4c15b484b
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-push12.js
@@ -0,0 +1,23 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = [];
+for (var i = -20; i < 0; ++i) {
+ a[i] = 0;
+}
+
+function g() {
+ [].push.apply(a, arguments);
+}
+
+function f() {
+ g();
+}
+
+g();
+g();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/asm/int32modb.js b/deps/v8/test/mjsunit/asm/int32modb.js
new file mode 100644
index 0000000000..5081b49ae5
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/int32modb.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = {};
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+var mod = (function Module(stdlib, foreign, heap) {
+ "use asm";
+ function mod(dividend, divisor) {
+ dividend = dividend|0;
+ divisor = divisor|0;
+ return (dividend % divisor) | 0;
+ }
+ return { mod: mod };
+})(stdlib, foreign, heap).mod;
+
+var divisors = [-2147483648, -32 * 1024, -1000, -16, -7, -2, -1, 0,
+ 1, 3, 4, 10, 64, 99, 1023, 1024, 2147483647];
+for (var i = 0; i < divisors.length; i++) {
+ var divisor = divisors[i];
+ for (var dividend = -2147483648; dividend < 2147483648; dividend += 3999773) {
+ assertEquals((dividend % divisor) | 0, mod(dividend, divisor));
+ }
+}
diff --git a/deps/v8/test/mjsunit/asm/redundancy1.js b/deps/v8/test/mjsunit/asm/redundancy1.js
new file mode 100644
index 0000000000..c7e0e37453
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/redundancy1.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-splitting
+
+function module(stdlib, foreign, heap) {
+ "use asm";
+ function foo(i) {
+ var j = 0;
+ i = i|0;
+ if (i < 0) {
+ j = i+1|0;
+ }
+ if (i > 0) {
+ j = i+1|0;
+ }
+ return j;
+ }
+ return { foo: foo };
+}
+
+var foo = module(this, {}, new ArrayBuffer(64*1024)).foo;
+assertEquals(0, foo(0));
+assertEquals(0, foo(-1));
+assertEquals(12, foo(11));
diff --git a/deps/v8/test/mjsunit/asm/redundancy2.js b/deps/v8/test/mjsunit/asm/redundancy2.js
new file mode 100644
index 0000000000..95a55b533e
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/redundancy2.js
@@ -0,0 +1,29 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-splitting
+
+function module(stdlib, foreign, heap) {
+ "use asm";
+ function foo(i) {
+ var j = 0;
+ i = i|0;
+ switch (i) {
+ case 0:
+ j = i+1|0;
+ break;
+ case 1:
+ j = i+1|0;
+ break;
+ default:
+ j = i;
+ break;
+ }
+ return j;
+ }
+ return { foo: foo };
+}
+
+var foo = module(this, {}, new ArrayBuffer(64*1024)).foo;
+print(foo(1));
diff --git a/deps/v8/test/mjsunit/asm/switch.js b/deps/v8/test/mjsunit/asm/switch.js
new file mode 100644
index 0000000000..5cb6329e43
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/switch.js
@@ -0,0 +1,120 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+
+var switch1 = (function(stdlib, foreign, heap) {
+ "use asm";
+ function switch1(i) {
+ i = i|0;
+ switch (i) {
+ case 0: return 1;
+ case 1: return 2;
+ default: return i|0;
+ }
+ }
+ return { switch1: switch1 };
+})(stdlib, foreign, heap).switch1;
+
+assertEquals(1, switch1(0));
+assertEquals(2, switch1(1));
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(i, switch1(i));
+}
+
+
+var switch2 = (function(stdlib, foreign, heap) {
+ "use asm";
+ function switch2(i) {
+ i = i|0;
+ var j = 0;
+ switch (i) {
+ case 0: j = 1; break;
+ case 1: j = 2; break;
+ case 2: j = 3; break;
+ default: j = i|0; break;
+ }
+ return j|0;
+ }
+ return { switch2: switch2 };
+})(stdlib, foreign, heap).switch2;
+
+assertEquals(1, switch2(0));
+assertEquals(2, switch2(1));
+assertEquals(3, switch2(2));
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(i, switch2(i));
+}
+
+
+var switch3 = (function(stdlib, foreign, heap) {
+ "use asm";
+ function switch3(i) {
+ i = i|0;
+ var j = 0;
+ switch (i) {
+ case 0:
+ case 1: j = 1; break;
+ case 2:
+ case 3: j = 2; break;
+ case 4:
+ case 5: j = 3; break;
+ default: j = 0; break;
+ }
+ return j|0;
+ }
+ return { switch3: switch3 };
+})(stdlib, foreign, heap).switch3;
+
+assertEquals(1, switch3(0));
+assertEquals(1, switch3(1));
+assertEquals(2, switch3(2));
+assertEquals(2, switch3(3));
+assertEquals(3, switch3(4));
+assertEquals(3, switch3(5));
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(0, switch3(i));
+}
+
+
+var switch4 = (function(stdlib, foreign, heap) {
+ "use asm";
+ function switch4(i) {
+ i = i|0;
+ switch (i) {
+ case -1:
+ case 1:
+ return 0;
+
+ case -2:
+ case 2:
+ return 1;
+
+ case -3:
+ case 3:
+ return 2;
+
+ case -8:
+ case 8:
+ return 3;
+
+ default:
+ return 4;
+ }
+ }
+ return { switch4: switch4 };
+})(stdlib, foreign, heap).switch4;
+
+assertEquals(4, switch4(0));
+assertEquals(0, switch4(-1));
+assertEquals(0, switch4(1));
+assertEquals(1, switch4(-2));
+assertEquals(1, switch4(2));
+assertEquals(3, switch4(-8));
+assertEquals(3, switch4(8));
+assertEquals(4, switch4(-123456789));
+assertEquals(4, switch4(123456789));
diff --git a/deps/v8/test/mjsunit/big-array-literal.js b/deps/v8/test/mjsunit/big-array-literal.js
index 401807f684..7e19c0a2dc 100644
--- a/deps/v8/test/mjsunit/big-array-literal.js
+++ b/deps/v8/test/mjsunit/big-array-literal.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// On MacOS X 10.7.5, this test needs a stack size of at least 788 kBytes.
+// On PPC64, this test needs a stack size of at least 698 kBytes.
// Flags: --stack-size=800
// Flags: --turbo-deoptimization
diff --git a/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js b/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js
new file mode 100644
index 0000000000..d4beff9f6e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-filter=*
+
+function foo() {
+ with ({ value:"fooed" }) { return value; }
+}
+
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("fooed", foo());
+assertOptimized(foo);
+
+function bar() {
+ with ({ value:"bared" }) { return value; }
+}
+
+assertEquals("bared", bar());
+%OptimizeFunctionOnNextCall(bar);
+assertEquals("bared", bar());
+assertOptimized(bar);
diff --git a/deps/v8/test/mjsunit/compiler/opt-next-call.js b/deps/v8/test/mjsunit/compiler/opt-next-call.js
index 6366c7d72e..3d7e74f626 100644
--- a/deps/v8/test/mjsunit/compiler/opt-next-call.js
+++ b/deps/v8/test/mjsunit/compiler/opt-next-call.js
@@ -11,3 +11,12 @@ function foo() {
%OptimizeFunctionOnNextCall(foo);
assertEquals("fooed", foo());
assertOptimized(foo);
+
+function bar() {
+ return "bared";
+}
+
+assertEquals("bared", bar());
+%OptimizeFunctionOnNextCall(bar);
+assertEquals("bared", bar());
+assertOptimized(bar);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-for-in.js b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
index 9c756aafa7..f3ff6beb05 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-for-in.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
@@ -251,9 +251,7 @@ function osr_inner(t, limit) {
if (t.hasOwnProperty(x)) {
for (var i = 0; i < t[x].length; i++) {
r += t[x][i];
- if (i === limit) {
- %OptimizeFunctionOnNextCall(osr_inner, "osr");
- }
+ if (i === limit) %OptimizeOsr();
}
r += x;
}
@@ -267,9 +265,7 @@ function osr_outer(t, osr_after) {
for (var i = 0; i < t[x].length; i++) {
r += t[x][i];
}
- if (x === osr_after) {
- %OptimizeFunctionOnNextCall(osr_outer, "osr");
- }
+ if (x === osr_after) %OptimizeOsr();
r += x;
}
return r;
@@ -279,9 +275,7 @@ function osr_outer_and_deopt(t, osr_after) {
var r = 1;
for (var x in t) {
r += x;
- if (x == osr_after) {
- %OptimizeFunctionOnNextCall(osr_outer_and_deopt, "osr");
- }
+ if (x == osr_after) %OptimizeOsr();
}
return r;
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-alignment.js b/deps/v8/test/mjsunit/compiler/osr-alignment.js
index 30d72d0614..085d6c4d68 100644
--- a/deps/v8/test/mjsunit/compiler/osr-alignment.js
+++ b/deps/v8/test/mjsunit/compiler/osr-alignment.js
@@ -25,37 +25,40 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
function f1() {
var sum = 0;
- for (var i = 0; i < 1000000; i++) {
+ for (var i = 0; i < 1000; i++) {
var x = i + 2;
var y = x + 5;
var z = y + 3;
sum += z;
+ if (i == 18) %OptimizeOsr();
}
return sum;
}
function f2() {
var sum = 0;
- for (var i = 0; i < 1000000; i++) {
+ for (var i = 0; i < 1000; i++) {
var x = i + 2;
var y = x + 5;
var z = y + 3;
sum += z;
+ if (i == 19) %OptimizeOsr();
}
return sum;
}
function f3() {
var sum = 0;
- for (var i = 0; i < 1000000; i++) {
+ for (var i = 0; i < 1000; i++) {
var x = i + 2;
var y = x + 5;
var z = y + 3;
sum += z;
+ if (i == 20) %OptimizeOsr();
}
return sum;
}
@@ -63,21 +66,21 @@ function f3() {
function test1() {
var j = 11;
for (var i = 0; i < 2; i++) {
- assertEquals(500009500000, f1());
+ assertEquals(509500, f1());
}
}
function test2() {
for (var i = 0; i < 2; i++) {
var j = 11, k = 12;
- assertEquals(500009500000, f2());
+ assertEquals(509500, f2());
}
}
function test3() {
for (var i = 0; i < 2; i++) {
var j = 11, k = 13, m = 14;
- assertEquals(500009500000, f3());
+ assertEquals(509500, f3());
}
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-backedges1.js b/deps/v8/test/mjsunit/compiler/osr-backedges1.js
new file mode 100644
index 0000000000..d415f4a107
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-backedges1.js
@@ -0,0 +1,31 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+function foo(a) {
+ var i = a | 0;
+ while (true) {
+ if (i == 0) { i = 1; continue; }
+ if (i == 1) { i = 2; continue; }
+ if (i == 2) { i = 3; continue; }
+ if (i == 3) { i = 4; continue; }
+ if (i == 4) { i = 5; continue; }
+ if (i == 5) { i = 6; continue; }
+ if (i == 6) { i = 7; continue; }
+ if (i == 7) { i = 8; continue; }
+ for (var j = 0; j < 10; j++) { if (i == 5) %OptimizeOsr(); }
+ break;
+ }
+ return j;
+}
+
+function test(func, tv, fv) {
+ assertEquals(tv, func(0));
+ assertEquals(tv, func(0));
+ assertEquals(fv, func(9));
+ assertEquals(fv, func(9));
+}
+
+test(foo, 10, 10);
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js b/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js
new file mode 100644
index 0000000000..df4076c411
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js
@@ -0,0 +1,27 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+"use strict";
+
+function foo() {
+ var result;
+ {
+ let sum = 0;
+ for (var i = 0; i < 100; i++) {
+ if (i == 50) %OptimizeOsr();
+ sum += i;
+ }
+ result = ret;
+ function ret() {
+ return sum;
+ }
+ }
+ return result;
+}
+
+assertEquals(4950, foo()());
+assertEquals(4950, foo()());
+assertEquals(4950, foo()());
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
new file mode 100644
index 0000000000..923c72f422
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
@@ -0,0 +1,40 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+"use strict";
+
+function foo() {
+ var result = new Array();
+ var out;
+ {
+ let sum = 0;
+ for (var i = 0; i < 10; i++) {
+ {
+ let x = i;
+ if (i == 5) %OptimizeOsr();
+ sum += i;
+ result.push(function() { return x; });
+ }
+ }
+ out = sum;
+ }
+ result.push(out);
+ return result;
+}
+
+
+function check() {
+ var r = foo();
+ assertEquals(45, r.pop());
+ for (var i = 9; i >= 0; i--) {
+ assertEquals(i, r.pop()());
+ }
+ assertEquals(0, r.length);
+}
+
+check();
+check();
+check();
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope.js b/deps/v8/test/mjsunit/compiler/osr-block-scope.js
new file mode 100644
index 0000000000..0d78cdcb64
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope.js
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+"use strict";
+
+function nest(body, name, depth) {
+ var header = "";
+ for (var i = 0; i < depth; i++) {
+ var x = "x" + (i + 1);
+ header += " for(var " + x + " = 0; " + x + " < 2; " + x + " = " + x + " + 1 | 0) {\n";
+ body = body + "}"
+ }
+
+ return body.replace(new RegExp("function " + name + "\\(\\) {"),
+ "function " + name + "_" + x + "() {\n" + header);
+}
+
+function test(expected, func, depth) {
+ assertEquals(expected, func());
+ assertEquals(expected, func());
+ assertEquals(expected, func());
+
+ var orig = func.toString();
+ var name = func.name;
+ for (var depth = 1; depth < 4; depth++) {
+ var body = nest(orig, name, depth);
+ func = eval("(" + body + ")");
+
+ assertEquals(expected, func());
+ assertEquals(expected, func());
+ assertEquals(expected, func());
+ }
+}
+
+function foo() {
+ var result;
+ {
+ let sum = 0;
+ for (var i = 0; i < 10; i++) {
+ %OptimizeOsr();
+ sum += i;
+ }
+ result = sum;
+ }
+ return result;
+}
+
+test(45, foo);
+
+function bar() {
+ let sum = 0;
+ for (var i = 0; i < 10; i++) {
+ %OptimizeOsr();
+ sum += i;
+ }
+ return sum;
+}
+
+test(45, bar);
+
+function bon() {
+ {
+ let sum = 0;
+ for (var i = 0; i < 10; i++) {
+ if (i == 5) %OptimizeOsr();
+ sum += i;
+ }
+ return sum;
+ }
+}
+
+test(45, bon);
+
+function row() {
+ var i = 0;
+ {
+ let sum = 0;
+ while (true) {
+ if (i == 8) return sum;
+ %OptimizeOsr();
+ sum = i;
+ i = i + 1 | 0;
+ }
+ }
+ return 11;
+}
+
+test(7, row);
+
+function nub() {
+ let i = 0;
+ while (i < 2) {
+ %OptimizeOsr();
+ i++;
+ }
+ return i;
+}
+
+test(2, nub);
+
+function kub() {
+ var result = 0;
+ let i = 0;
+ while (i < 2) {
+ let x = i;
+ %OptimizeOsr();
+ i++;
+ result = x;
+ }
+ return result;
+}
+
+test(1, kub);
diff --git a/deps/v8/test/mjsunit/compiler/osr-follow.js b/deps/v8/test/mjsunit/compiler/osr-follow.js
new file mode 100644
index 0000000000..b6a2e8e4be
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-follow.js
@@ -0,0 +1,61 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --turbo-osr
+
+function foo(a) {
+ var sum = 0;
+ var inc = a ? 100 : 200;
+ for (var i = 0; i < 100000; i++) {
+ sum += inc;
+ }
+ return sum + inc;
+}
+
+function bar(a) {
+ var sum = 0;
+ var inc = a ? 100 : 200;
+ var x = a ? 5 : 6;
+ var y = a ? 7 : 8;
+ for (var i = 0; i < 100000; i++) {
+ sum += inc;
+ }
+ return sum ? x : y;
+}
+
+function baz(a) {
+ var limit = a ? 100001 : 100002;
+ var r = 1;
+ var x = a ? 1 : 2;
+ var y = a ? 3 : 4;
+ for (var i = 0; i < limit; i++) {
+ r = r * -1;
+ }
+ return r > 0 ? x == y : x != y;
+}
+
+function qux(a) {
+ var limit = a ? 100001 : 100002;
+ var r = 1;
+ var x = a ? 1 : 2;
+ var y = a ? 3 : 4;
+ for (var i = 0; i < limit; i++) {
+ r = r * -1;
+ }
+ var w = r > 0 ? x : y;
+ var z = r > 0 ? y : x;
+ return w === z;
+}
+
+function test(func, tv, fv) {
+ assertEquals(tv, func(true));
+ assertEquals(fv, func(false));
+ assertEquals(tv, func(true));
+ assertEquals(fv, func(false));
+}
+
+test(foo, 10000100, 20000200);
+test(bar, 5, 6);
+test(baz, true, false);
+test(qux, false, false);
diff --git a/deps/v8/test/mjsunit/compiler/osr-for-let.js b/deps/v8/test/mjsunit/compiler/osr-for-let.js
new file mode 100644
index 0000000000..4b2fa3e532
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-for-let.js
@@ -0,0 +1,82 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+"use strict";
+
+function test(expected, func) {
+ assertEquals(expected, func());
+ assertEquals(expected, func());
+ assertEquals(expected, func());
+}
+
+function bar() {
+ var result;
+ {
+ let sum = 0;
+ for (let i = 0; i < 90; i++) {
+ sum += i;
+ if (i == 45) %OptimizeOsr();
+ }
+ result = sum;
+ }
+ return result;
+}
+
+test(4005, bar);
+
+function baz() {
+ let sum = 0;
+ for (let i = 0; i < 2; i++) {
+ sum = 2;
+ %OptimizeOsr();
+ }
+ return sum;
+}
+
+test(2, baz);
+
+function qux() {
+ var result = 0;
+ for (let i = 0; i < 2; i++) {
+ result = i;
+ %OptimizeOsr();
+ }
+ return result;
+}
+
+test(1, qux);
+
+function nux() {
+ var result = 0;
+ for (let i = 0; i < 2; i++) {
+ {
+ let sum = i;
+ %OptimizeOsr();
+ result = sum;
+ }
+ }
+ return result;
+}
+
+test(1, nux);
+
+function blo() {
+ var result;
+ {
+ let sum = 0;
+ for (let i = 0; i < 90; i++) {
+ sum += i;
+ if (i == 45) %OptimizeOsr();
+ }
+ result = ret;
+ function ret() {
+ return sum;
+ }
+ }
+ return result;
+}
+
+test(4005, blo());
diff --git a/deps/v8/test/mjsunit/compiler/osr-forin.js b/deps/v8/test/mjsunit/compiler/osr-forin.js
new file mode 100644
index 0000000000..8d1678224c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-forin.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --turbo-osr
+
+function f(a) {
+ var sum = 0;
+ for (var j in a) {
+ var i = a[j];
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
+ return sum;
+}
+
+var a = new Array(10000);
+for (var i = 0; i < 10000; i++) {
+ a[i] = (i * 999) % 77;
+}
+
+for (var i = 0; i < 3; i++) {
+ assertEquals(480270, f(a));
+}
diff --git a/deps/v8/test/mjsunit/compiler/osr-forof.js b/deps/v8/test/mjsunit/compiler/osr-forof.js
new file mode 100644
index 0000000000..36bff09c58
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-forof.js
@@ -0,0 +1,35 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --turbo-osr
+
+function f(a) {
+ var sum = 0;
+ for (var i of a) {
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
+ return sum;
+}
+
+var a = new Array(10000);
+for (var i = 0; i < 10000; i++) {
+ a[i] = (i * 999) % 77;
+}
+
+for (var i = 0; i < 3; i++) {
+ assertEquals(480270, f(wrap(a)));
+}
+
+function wrap(array) {
+ var iterable = {};
+ var i = 0;
+ function next() {
+ return { done: i >= array.length, value: array[i++] };
+ };
+ iterable[Symbol.iterator] = function() { return { next:next }; };
+ return iterable;
+}
diff --git a/deps/v8/test/mjsunit/compiler/osr-function-id.js b/deps/v8/test/mjsunit/compiler/osr-function-id.js
new file mode 100644
index 0000000000..c506ae8282
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-function-id.js
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --turbo-osr
+
+function id(f) { return f; }
+
+function foo() {
+ var sum = 0;
+ var r = id(foo);
+ for (var i = 0; i < 100000; i++) {
+ sum += i;
+ }
+ return foo == r;
+}
+
+assertEquals(true, foo());
+assertEquals(true, foo());
+assertEquals(true, foo());
+
+
+function bar() {
+ var sum = 0;
+ for (var i = 0; i < 90000; i++) {
+ sum += i;
+ }
+ return id(bar,sum);
+}
+
+assertEquals(bar, bar());
+assertEquals(bar, bar());
+assertEquals(bar, bar());
diff --git a/deps/v8/test/mjsunit/compiler/osr-function-id2.js b/deps/v8/test/mjsunit/compiler/osr-function-id2.js
new file mode 100644
index 0000000000..561c62e1bc
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-function-id2.js
@@ -0,0 +1,28 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --turbo-osr
+
+function id(f) { return f; }
+
+var x = (function foo() {
+ var sum = 0;
+ var r = id(foo);
+ for (var i = 0; i < 100000; i++) {
+ sum += i;
+ }
+ return foo == r;
+})();
+
+assertEquals(true, x);
+
+var x = (function bar() {
+ var sum = 0;
+ for (var i = 0; i < 90000; i++) {
+ sum += i;
+ }
+ return bar;
+})();
+
+assertEquals("function", typeof x);
diff --git a/deps/v8/test/mjsunit/compiler/osr-function.js b/deps/v8/test/mjsunit/compiler/osr-function.js
new file mode 100644
index 0000000000..06d137b62c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-function.js
@@ -0,0 +1,31 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --turbo-osr
+
+function foo() {
+ var sum = 0;
+ for (var i = 0; i < 100000; i++) {
+ sum += i;
+ }
+ return function() { return sum; }
+}
+
+assertEquals(4999950000, foo()());
+assertEquals(4999950000, foo()());
+assertEquals(4999950000, foo()());
+
+function bar() {
+ var sum = 0;
+ var ret = 0;
+ for (var i = 0; i < 90000; i++) {
+ sum += i;
+ if (i == 0) ret = function() { return sum; }
+ }
+ return ret;
+}
+
+assertEquals(4049955000, bar()());
+assertEquals(4049955000, bar()());
+assertEquals(4049955000, bar()());
diff --git a/deps/v8/test/mjsunit/compiler/osr-manual1.js b/deps/v8/test/mjsunit/compiler/osr-manual1.js
new file mode 100644
index 0000000000..29a4948a65
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-manual1.js
@@ -0,0 +1,35 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+var counter = 111;
+
+function gen(w) { // defeat compiler cache.
+ var num = counter++;
+ var Z = [ "", "", "", ];
+ Z[w] = "%OptimizeOsr()";
+ var src =
+ "function f" + num + "(a,b,c) {" +
+ " var x = 0;" +
+ " var y = 0;" +
+ " var z = 0;" +
+ " while (a > 0) { " + Z[0] + "; x += 19; a--; }" +
+ " while (b > 0) { " + Z[1] + "; y += 23; b--; }" +
+ " while (c > 0) { " + Z[2] + "; z += 29; c--; }" +
+ " return x + y + z;" +
+ "} f" + num;
+ return eval(src);
+}
+
+function check(x,a,b,c) {
+ for (var i = 0; i < 3; i++) {
+ var f = gen(i);
+ assertEquals(x, f(a, b, c));
+ }
+}
+
+check(213, 3,3,3);
+check(365, 4,5,6);
+check(6948, 99,98,97);
diff --git a/deps/v8/test/mjsunit/compiler/osr-manual2.js b/deps/v8/test/mjsunit/compiler/osr-manual2.js
new file mode 100644
index 0000000000..8aa5d69db3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-manual2.js
@@ -0,0 +1,35 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+var counter = 188;
+
+function gen(w) { // defeat compiler cache.
+ var num = counter++;
+ var Z = [ "", "", "", ];
+ Z[w] = "%OptimizeOsr()";
+ var src =
+ "function f" + num + "(a,b,c) {" +
+ " var x = 0;" +
+ " var y = 0;" +
+ " var z = 0;" +
+ " while (a > 0) { " + Z[0] + "; x += 19; a--; var j=2; while(j--); }" +
+ " while (b > 0) { " + Z[1] + "; y += 23; b--; var j=2; while(j--); }" +
+ " while (c > 0) { " + Z[2] + "; z += 29; c--; var j=2; while(j--); }" +
+ " return x + y + z;" +
+ "} f" + num;
+ return eval(src);
+}
+
+function check(x,a,b,c) {
+ for (var i = 0; i < 3; i++) {
+ var f = gen(i);
+ assertEquals(x, f(a, b, c));
+ }
+}
+
+check(213, 3,3,3);
+check(365, 4,5,6);
+check(6948, 99,98,97);
diff --git a/deps/v8/test/mjsunit/compiler/osr-maze1.js b/deps/v8/test/mjsunit/compiler/osr-maze1.js
new file mode 100644
index 0000000000..6e192c17b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-maze1.js
@@ -0,0 +1,51 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-deoptimization
+
+function bar(goal) {
+ var count = 0;
+ var sum = 11;
+ var i = 35;
+ while (i-- > 33) {
+ if (count++ == goal) %OptimizeOsr();
+ sum = sum + i;
+ }
+ while (i-- > 31) {
+ if (count++ == goal) %OptimizeOsr();
+ j = 9;
+ while (j-- > 7) {
+ if (count++ == goal) %OptimizeOsr();
+ sum = sum + j * 3;
+ }
+ while (j-- > 5) {
+ if (count++ == goal) %OptimizeOsr();
+ sum = sum + j * 5;
+ }
+ }
+ while (i-- > 29) {
+ if (count++ == goal) %OptimizeOsr();
+ while (j-- > 3) {
+ var k = 10;
+ if (count++ == goal) %OptimizeOsr();
+ while (k-- > 8) {
+ if (count++ == goal) %OptimizeOsr();
+ sum = sum + k * 11;
+ }
+ }
+ while (j-- > 1) {
+ if (count++ == goal) %OptimizeOsr();
+ while (k-- > 6) {
+ if (count++ == goal) %OptimizeOsr();
+ sum = sum + j * 13;
+ }
+ }
+ }
+ return sum;
+}
+
+for (var i = 0; i < 13; i++) {
+ %DeoptimizeFunction(bar);
+ assertEquals(348, bar(i));
+}
diff --git a/deps/v8/test/mjsunit/compiler/osr-maze2.js b/deps/v8/test/mjsunit/compiler/osr-maze2.js
new file mode 100644
index 0000000000..96838a4c34
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-maze2.js
@@ -0,0 +1,63 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-deoptimization
+
+function bar() {
+ var sum = 11;
+ var i = 35;
+ while (i-- > 31) {
+ LOOP1();
+ j = 9;
+ while (j-- > 7) {
+ LOOP2();
+ sum = sum + j * 5;
+ var k = 7;
+ while (k-- > 5) {
+ LOOP3();
+ sum = sum + j * 5;
+ }
+ }
+ }
+ while (i-- > 29) {
+ LOOP4();
+ while (j-- > 3) {
+ LOOP5();
+ var k = 10;
+ while (k-- > 8) {
+ LOOP6();
+ sum = sum + k * 11;
+ }
+ }
+ while (j-- > 1) {
+ LOOP7();
+ var k = 8;
+ while (k-- > 6) {
+ LOOP8();
+ var m = 9;
+ while (m-- > 6) {
+ LOOP9();
+ sum = sum + k * 13;
+ }
+ }
+ }
+ }
+ return sum;
+}
+
+function gen(i) {
+ var body = bar.toString();
+ body = body.replace(new RegExp("bar"), "bar" + i);
+ for (var j = 1; j < 10; j++) {
+ var r = new RegExp("LOOP" + j + "\\(\\);");
+ if (i == j) body = body.replace(r, "%OptimizeOsr();");
+ else body = body.replace(r, "");
+ }
+ return eval("(" + body + ")");
+}
+
+for (var i = 1; i < 10; i++) {
+ var f = gen(i);
+ assertEquals(1979, f());
+}
diff --git a/deps/v8/test/mjsunit/compiler/osr-multiple.js b/deps/v8/test/mjsunit/compiler/osr-multiple.js
new file mode 100644
index 0000000000..c318645d32
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-multiple.js
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --turbo-osr
+
+function f1(a,b,c) {
+ var x = 0;
+ var y = 0;
+ var z = 0;
+ while (a > 0) { x += 19; a--; }
+ while (b > 0) { y += 23; b--; }
+ while (c > 0) { z += 29; c--; }
+ return x + y + z;
+}
+
+function f2(a,b,c) {
+ var x = 0;
+ var y = 0;
+ var z = 0;
+ while (a > 0) { x += 19; a--; }
+ while (b > 0) { y += 23; b--; }
+ while (c > 0) { z += 29; c--; }
+ return x + y + z;
+}
+
+
+function f3(a,b,c) {
+ var x = 0;
+ var y = 0;
+ var z = 0;
+ while (a > 0) { x += 19; a--; }
+ while (b > 0) { y += 23; b--; }
+ while (c > 0) { z += 29; c--; }
+ return x + y + z;
+}
+
+function check(f,a,b,c) {
+ assertEquals(a * 19 + b * 23 + c * 29, f(a,b,c));
+}
+
+check(f1, 50000, 5, 6);
+check(f2, 4, 50000, 6);
+check(f3, 11, 12, 50000);
diff --git a/deps/v8/test/mjsunit/compiler/osr-multiple2.js b/deps/v8/test/mjsunit/compiler/osr-multiple2.js
new file mode 100644
index 0000000000..9a81bfb658
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-multiple2.js
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr
+// TODO(titzer): enable --turbo-osr when nested OSR works.
+
+function f1(a,b,c) {
+ var x = 0;
+ var y = 0;
+ var z = 0;
+ for (var i = 0; i < 2; i++) {
+ while (a > 0) { x += 19; a--; }
+ while (b > 0) { y += 23; b--; }
+ while (c > 0) { z += 29; c--; }
+ }
+ return x + y + z;
+}
+
+function f2(a,b,c) {
+ var x = 0;
+ var y = 0;
+ var z = 0;
+ for (var i = 0; i < 2; i++) {
+ while (a > 0) { x += 19; a--; }
+ while (b > 0) { y += 23; b--; }
+ while (c > 0) { z += 29; c--; }
+ }
+ return x + y + z;
+}
+
+
+function f3(a,b,c) {
+ var x = 0;
+ var y = 0;
+ var z = 0;
+ for (var i = 0; i < 2; i++) {
+ while (a > 0) { x += 19; a--; }
+ while (b > 0) { y += 23; b--; }
+ while (c > 0) { z += 29; c--; }
+ }
+ return x + y + z;
+}
+
+function check(f,a,b,c) {
+ assertEquals(a * 19 + b * 23 + c * 29, f(a,b,c));
+}
+
+check(f1, 50000, 5, 6);
+check(f2, 4, 50000, 6);
+check(f3, 11, 12, 50000);
diff --git a/deps/v8/test/mjsunit/compiler/osr-multiple3.js b/deps/v8/test/mjsunit/compiler/osr-multiple3.js
new file mode 100644
index 0000000000..0fb1ac73a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-multiple3.js
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr
+// TODO(titzer): enable --turbo-osr when nested OSR works.
+
+function f1(a,b,c) {
+ var x = 0;
+ var y = 0;
+ var z = 0;
+ for (var i = 0; i < 2; i++) {
+ for (var j = 0; j < 2; j++) {
+ while (a > 0) { x += 19; a--; }
+ while (b > 0) { y += 23; b--; }
+ while (c > 0) { z += 29; c--; }
+ }
+ }
+ return x + y + z;
+}
+
+function f2(a,b,c) {
+ var x = 0;
+ var y = 0;
+ var z = 0;
+ for (var i = 0; i < 2; i++) {
+ for (var j = 0; j < 2; j++) {
+ while (a > 0) { x += 19; a--; }
+ while (b > 0) { y += 23; b--; }
+ while (c > 0) { z += 29; c--; }
+ }
+ }
+ return x + y + z;
+}
+
+
+function f3(a,b,c) {
+ var x = 0;
+ var y = 0;
+ var z = 0;
+ for (var i = 0; i < 2; i++) {
+ for (var j = 0; j < 2; j++) {
+ while (a > 0) { x += 19; a--; }
+ while (b > 0) { y += 23; b--; }
+ while (c > 0) { z += 29; c--; }
+ }
+ }
+ return x + y + z;
+}
+
+function check(f,a,b,c) {
+ assertEquals(a * 19 + b * 23 + c * 29, f(a,b,c));
+}
+
+check(f1, 50000, 5, 6);
+check(f2, 4, 50000, 6);
+check(f3, 11, 12, 50000);
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested2.js b/deps/v8/test/mjsunit/compiler/osr-nested2.js
new file mode 100644
index 0000000000..41bd9b247b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-nested2.js
@@ -0,0 +1,24 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+function f() {
+ var sum = 0;
+ for (var i = 5; i < 6; i++) {
+ for (var j = 0; j < 1000; j++) {
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ if (i == 21) %OptimizeOsr();
+ }
+ }
+ return sum;
+}
+
+
+assertEquals(15000, f());
+assertEquals(15000, f());
+assertEquals(15000, f());
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested2b.js b/deps/v8/test/mjsunit/compiler/osr-nested2b.js
new file mode 100644
index 0000000000..e64c10ccb4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-nested2b.js
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+function f() {
+ var sum = 0;
+ for (var i = 5; i < 6; i++) {
+ for (var j = 0; j < 1000; j++) {
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ if (i == 25) %OptimizeOsr();
+ }
+ if (true) break;
+ }
+ return sum;
+}
+
+
+assertEquals(15000, f());
+assertEquals(15000, f());
+assertEquals(15000, f());
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested3.js b/deps/v8/test/mjsunit/compiler/osr-nested3.js
new file mode 100644
index 0000000000..f5d09ba166
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-nested3.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+function f() {
+ var sum = 0;
+ for (var m = 99; m < 100; m++) {
+ for (var i = 5; i < 6; i++) {
+ for (var j = 0; j < 1000; j++) {
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ if (i == 19) %OptimizeOsr();
+ }
+ }
+ }
+ return sum;
+}
+
+
+assertEquals(15000, f());
+assertEquals(15000, f());
+assertEquals(15000, f());
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested3b.js b/deps/v8/test/mjsunit/compiler/osr-nested3b.js
new file mode 100644
index 0000000000..32ac2a7058
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-nested3b.js
@@ -0,0 +1,28 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+function f() {
+ var sum = 0;
+ for (var m = 99; m < 100; m++) {
+ for (var i = 5; i < 6; i++) {
+ for (var j = 0; j < 1000; j++) {
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ if (i == 25) %OptimizeOsr();
+ }
+ if (true) break;
+ }
+ if (true) break;
+ }
+ return sum;
+}
+
+
+assertEquals(15000, f());
+assertEquals(15000, f());
+assertEquals(15000, f());
diff --git a/deps/v8/test/mjsunit/compiler/osr-regex-id.js b/deps/v8/test/mjsunit/compiler/osr-regex-id.js
new file mode 100644
index 0000000000..7831b14840
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-regex-id.js
@@ -0,0 +1,54 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+function id(f) { return f; }
+
+function foo(a) {
+ var r = /\0/;
+ for (var i = 0; i < 10; i++) {
+ if (a) %OptimizeOsr();
+ }
+ return r;
+}
+
+function bar(a) {
+ for (var i = 0; i < 10; i++) {
+ if (a) %OptimizeOsr();
+ var r = /\0/;
+ }
+ return r;
+}
+
+function baz(a) {
+ for (var i = 0; i < 10; i++) {
+ if (a) %OptimizeOsr();
+ }
+ return /\0/;
+}
+
+function qux(a) {
+ for (var i = 0; i < 10; i++) {
+ if (i > 5 && a) {
+ %OptimizeOsr();
+ } else {
+ var r = /\0/;
+ }
+ }
+ return r;
+}
+
+function test(f) {
+ // Test the reference equality of regex's created in OSR'd function.
+ var x = f(false);
+ assertEquals(x, f(true));
+ assertEquals(x, f(true));
+ assertEquals(x, f(true));
+}
+
+test(foo);
+test(bar);
+test(baz);
+test(qux);
diff --git a/deps/v8/test/mjsunit/compiler/osr-sar.js b/deps/v8/test/mjsunit/compiler/osr-sar.js
index fd68b98a45..cc04adca8a 100644
--- a/deps/v8/test/mjsunit/compiler/osr-sar.js
+++ b/deps/v8/test/mjsunit/compiler/osr-sar.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
function test() {
// Loop to force OSR.
diff --git a/deps/v8/test/mjsunit/compiler/osr-simple.js b/deps/v8/test/mjsunit/compiler/osr-simple.js
index 8ec1b2b936..ddbc5f8867 100644
--- a/deps/v8/test/mjsunit/compiler/osr-simple.js
+++ b/deps/v8/test/mjsunit/compiler/osr-simple.js
@@ -1,44 +1,22 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-// Flags: --use-osr
+// Flags: --allow-natives-syntax --use-osr
function f() {
var sum = 0;
- for (var i = 0; i < 1000000; i++) {
+ for (var i = 0; i < 1000; i++) {
var x = i + 2;
var y = x + 5;
var z = y + 3;
sum += z;
+ if (i == 11) %OptimizeOsr();
}
return sum;
}
for (var i = 0; i < 2; i++) {
- assertEquals(500009500000, f());
+ assertEquals(509500, f());
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-top1.js b/deps/v8/test/mjsunit/compiler/osr-top1.js
new file mode 100644
index 0000000000..742b71d86e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-top1.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --allow-natives-syntax
+
+var sum = 0;
+for (var i = 0; i < 10000; i++) {
+ if (i == 100) %OptimizeOsr();
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+}
+
+assertEquals(50095000, sum);
diff --git a/deps/v8/test/mjsunit/compiler/osr-top2.js b/deps/v8/test/mjsunit/compiler/osr-top2.js
new file mode 100644
index 0000000000..a15aa15d04
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-top2.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --allow-natives-syntax
+
+for (var j = 0; j < 3; j++) {
+ var sum = 0;
+ for (var i = 0; i < 1000; i++) {
+ if (i == 100) %OptimizeOsr();
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
+ assertEquals(509500, sum);
+}
+
+assertEquals(509500, sum);
diff --git a/deps/v8/test/mjsunit/compiler/osr-top3.js b/deps/v8/test/mjsunit/compiler/osr-top3.js
new file mode 100644
index 0000000000..4c4a364be0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-top3.js
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --allow-natives-syntax
+
+for (var k = 0; k < 2; k++) {
+ for (var j = 0; j < 3; j++) {
+ var sum = 0;
+ for (var i = 0; i < 1000; i++) {
+ if (i == 100) %OptimizeOsr();
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
+ assertEquals(509500, sum);
+ }
+ assertEquals(509500, sum);
+}
+
+assertEquals(509500, sum);
diff --git a/deps/v8/test/mjsunit/compiler/osr-warm.js b/deps/v8/test/mjsunit/compiler/osr-warm.js
index 73e1fd5cd2..7c30c07f20 100644
--- a/deps/v8/test/mjsunit/compiler/osr-warm.js
+++ b/deps/v8/test/mjsunit/compiler/osr-warm.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr
+// Flags: --use-osr --turbo-osr
function f1(x) {
while (x > 0) {
diff --git a/deps/v8/test/mjsunit/compiler/osr-while-let.js b/deps/v8/test/mjsunit/compiler/osr-while-let.js
new file mode 100644
index 0000000000..c19cf6cb24
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-while-let.js
@@ -0,0 +1,58 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+"use strict";
+
+function test(expected, func) {
+ assertEquals(expected, func());
+ assertEquals(expected, func());
+ assertEquals(expected, func());
+}
+
+function foo() {
+ var result = 0;
+ {
+ let x = 0;
+ var temp_x = x;
+ var first = 1;
+ outer: while (true) {
+ let x = temp_x;
+ if (first == 1) first = 0;
+ else x = x + 1 | 0;
+ var flag = 1;
+ for (; flag == 1; (flag = 0, temp_x = x)) {
+ if (x < 2) {
+ result = x; %OptimizeOsr();
+ } else {
+ break outer;
+ }
+ }
+ if (flag == 1) break;
+ }
+ }
+ return result;
+}
+
+test(1, foo);
+
+
+function smo() {
+ var result = 0;
+ {
+ let x = 11;
+ outer: while (true) {
+ let y = x;
+ for (var i = 0; i < 5; i++) {
+ %OptimizeOsr();
+ if (i) break outer;
+ else result = y;
+ }
+ }
+ }
+ return result;
+}
+
+test(11, smo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-3812.js b/deps/v8/test/mjsunit/compiler/regress-3812.js
new file mode 100644
index 0000000000..cfc8febc9a
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-3812.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+var foo = (function Module(stdlib, foreign, heap) {
+ "use asm";
+ function foo(i) {
+ var x = i ? (i&1) : true;
+ if (x) return x;
+ return false;
+ }
+ return {foo:foo};
+})(stdlib, foreign, buffer).foo;
+
+assertEquals(1, foo(1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-416359.js b/deps/v8/test/mjsunit/compiler/regress-416359.js
new file mode 100644
index 0000000000..18cdc5e728
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-416359.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict"
+function f() {
+ for (x in {a:0});
+}
+
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-445907.js b/deps/v8/test/mjsunit/compiler/regress-445907.js
new file mode 100644
index 0000000000..c820753eec
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-445907.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-deoptimization
+
+v = [];
+v.length = (1 << 30);
+
+function f() {
+ v++;
+}
+
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-446647.js b/deps/v8/test/mjsunit/compiler/regress-446647.js
new file mode 100644
index 0000000000..77757abd66
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-446647.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt --turbo-filter=* --turbo-deoptimization --allow-natives-syntax
+
+function f(a,b) {
+ a%b
+};
+
+f({ toString : function() { %DeoptimizeFunction(f); }});
diff --git a/deps/v8/test/mjsunit/compiler/regress-447567.js b/deps/v8/test/mjsunit/compiler/regress-447567.js
new file mode 100644
index 0000000000..b6dc653709
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-447567.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-deoptimization
+
+assertThrows(function() {
+ [0].every(function(){ Object.seal((new Int8Array())); });
+})
+
+assertThrows(function() {
+ "use strict";
+ const v = 42;
+ v += 1;
+});
diff --git a/deps/v8/test/mjsunit/compiler/regress-451012.js b/deps/v8/test/mjsunit/compiler/regress-451012.js
new file mode 100644
index 0000000000..bffc8bc5bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-451012.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+function f() {
+ for (let v; v; ) {
+ let x;
+ }
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-452427.js b/deps/v8/test/mjsunit/compiler/regress-452427.js
new file mode 100644
index 0000000000..f798b9cc79
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-452427.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = {};
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+var rol = (function Module(stdlib, foreign, heap) {
+ "use asm";
+ function rol() {
+ y = "a" > false;
+ return y + (1 - y);
+ }
+ return { rol: rol };
+})(stdlib, foreign, heap).rol;
+
+assertEquals(1, rol());
diff --git a/deps/v8/test/mjsunit/compiler/regress-to-number-binop-deopt.js b/deps/v8/test/mjsunit/compiler/regress-to-number-binop-deopt.js
new file mode 100644
index 0000000000..f6b77d9082
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-to-number-binop-deopt.js
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function deopt(f) {
+ return { valueOf : function() { %DeoptimizeFunction(f); return 1.1; } };
+}
+
+function or_zero(o) {
+ return o|0;
+}
+
+function multiply_one(o) {
+ return +o;
+}
+
+function multiply_one_symbol() {
+ return +Symbol();
+}
+
+assertThrows(multiply_one_symbol, TypeError);
+assertEquals(1, or_zero(deopt(or_zero)));
+assertEquals(1.1, multiply_one(deopt(multiply_one)));
diff --git a/deps/v8/test/mjsunit/count-based-osr.js b/deps/v8/test/mjsunit/count-based-osr.js
index f06013083f..2df0b644f6 100644
--- a/deps/v8/test/mjsunit/count-based-osr.js
+++ b/deps/v8/test/mjsunit/count-based-osr.js
@@ -31,9 +31,8 @@
function osr_this() {
var a = 1;
- // Trigger OSR. First check if optimization is disabled.
- if (%GetOptimizationStatus(osr_this) == 4) return 1;
- while (%GetOptimizationCount(osr_this) == 0) {}
+ while (%GetOptimizationCount(osr_this) == 2) ;
return a;
}
assertEquals(1, osr_this());
+assertEquals(1, osr_this());
diff --git a/deps/v8/test/mjsunit/d8-os.js b/deps/v8/test/mjsunit/d8-os.js
index f6b98396e5..29d31032e7 100644
--- a/deps/v8/test/mjsunit/d8-os.js
+++ b/deps/v8/test/mjsunit/d8-os.js
@@ -135,7 +135,7 @@ if (this.os && os.system) {
assertThrows("os.system('sleep', ['2000'], -1, 20);", "sleep 2");
// Check that -1 means no timeout.
- os.system('sleep', ['0.1'], -1, -1);
+ os.system('sleep', ['1'], -1, -1);
}
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index 07f0e3c459..af1eb454d6 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -96,9 +96,9 @@ if (extension_gc_script) {
}
// Test a normal script.
-var mjsunit_js_script = Debug.findScript(/mjsunit.js/);
-assertTrue(/mjsunit.js/.test(mjsunit_js_script.name));
-assertEquals(Debug.ScriptType.Normal, mjsunit_js_script.type);
+var debug_script = Debug.findScript(/debug-script.js/);
+assertTrue(/debug-script.js/.test(debug_script.name));
+assertEquals(Debug.ScriptType.Normal, debug_script.type);
// Check a nonexistent script.
var dummy_script = Debug.findScript('dummy.js');
diff --git a/deps/v8/test/mjsunit/es6/array-tostring.js b/deps/v8/test/mjsunit/es6/array-tostring.js
new file mode 100644
index 0000000000..8a9198ca16
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-tostring.js
@@ -0,0 +1,157 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-tostring
+
+var global = this;
+
+var funs = {
+ Object: [ Object ],
+ Function: [ Function ],
+ String: [ String ],
+ Boolean: [ Boolean ],
+ Number: [ Number ],
+ Date: [ Date ],
+ RegExp: [ RegExp ],
+ Error: [ Error, TypeError, RangeError, SyntaxError, ReferenceError,
+ EvalError, URIError ]
+}
+for (f in funs) {
+ for (i in funs[f]) {
+ assertEquals("[object " + f + "]",
+ Array.prototype.toString.call(new funs[f][i]),
+ funs[f][i]);
+ assertEquals("[object Function]",
+ Array.prototype.toString.call(funs[f][i]),
+ funs[f][i]);
+ }
+}
+
+
+function testToStringTag(className) {
+ // Using builtin toStringTags
+ var obj = {};
+ obj[Symbol.toStringTag] = className;
+ assertEquals("[object " + className + "]",
+ Array.prototype.toString.call(obj));
+
+ // Getter throws
+ obj = {};
+ Object.defineProperty(obj, Symbol.toStringTag, {
+ get: function() { throw className; }
+ });
+ assertThrows(function() {
+ Array.prototype.toString.call(obj);
+ }, className);
+
+ // Getter does not throw
+ obj = {};
+ Object.defineProperty(obj, Symbol.toStringTag, {
+ get: function() { return className; }
+ });
+ assertEquals("[object " + className + "]",
+ Array.prototype.toString.call(obj));
+
+ // Custom, non-builtin toStringTags
+ obj = {};
+ obj[Symbol.toStringTag] = "X" + className;
+ assertEquals("[object X" + className + "]",
+ Array.prototype.toString.call(obj));
+
+ // With getter
+ obj = {};
+ Object.defineProperty(obj, Symbol.toStringTag, {
+ get: function() { return "X" + className; }
+ });
+ assertEquals("[object X" + className + "]",
+ Array.prototype.toString.call(obj));
+
+ // Undefined toStringTag should return [object className]
+ var obj = className === "Arguments" ?
+ (function() { return arguments; })() : new global[className];
+ obj[Symbol.toStringTag] = undefined;
+ assertEquals("[object " + className + "]",
+ Array.prototype.toString.call(obj));
+
+ // With getter
+ var obj = className === "Arguments" ?
+ (function() { return arguments; })() : new global[className];
+ Object.defineProperty(obj, Symbol.toStringTag, {
+ get: function() { return undefined; }
+ });
+ assertEquals("[object " + className + "]",
+ Array.prototype.toString.call(obj));
+}
+
+
+[
+ "Arguments",
+ "Boolean",
+ "Date",
+ "Error",
+ "Function",
+ "Number",
+ "RegExp",
+ "String"
+].forEach(testToStringTag);
+
+
+function testToStringTagNonString(value) {
+ var obj = {};
+ obj[Symbol.toStringTag] = value;
+ assertEquals("[object Object]", Array.prototype.toString.call(obj));
+
+ // With getter
+ obj = {};
+ Object.defineProperty(obj, Symbol.toStringTag, {
+ get: function() { return value; }
+ });
+ assertEquals("[object Object]", Array.prototype.toString.call(obj));
+}
+
+
+[
+ null,
+ function() {},
+ [],
+ {},
+ /regexp/,
+ 42,
+ Symbol("sym"),
+ new Date(),
+ (function() { return arguments; })(),
+ true,
+ new Error("oops"),
+ new String("str")
+].forEach(testToStringTagNonString);
+
+
+function testArrayToStringPropertyDesc() {
+ var desc = Object.getOwnPropertyDescriptor(Object.prototype, "toString");
+ assertTrue(desc.writable);
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+}
+testArrayToStringPropertyDesc();
+
+
+function testArrayToStringOwnNonStringValue() {
+ var obj = Object.defineProperty({}, Symbol.toStringTag, { value: 1 });
+ assertEquals("[object Object]", ([]).toString.call(obj));
+}
+testArrayToStringOwnNonStringValue();
+
+
+function testArrayToStringBasic() {
+ assertEquals("1,2,3", [1,2,3].toString());
+ assertEquals(",,3", [,,3].toString());
+}
+testArrayToStringBasic();
+
+
+function testArrayToStringObjectWithCallableJoin() {
+ var obj = { join: function() { return "CallableJoin"; } };
+ assertEquals("CallableJoin", Array.prototype.toString.call(obj));
+}
+testArrayToStringObjectWithCallableJoin();
diff --git a/deps/v8/test/mjsunit/es6/iteration-syntax.js b/deps/v8/test/mjsunit/es6/iteration-syntax.js
index 356a97898a..4be94c5db4 100644
--- a/deps/v8/test/mjsunit/es6/iteration-syntax.js
+++ b/deps/v8/test/mjsunit/es6/iteration-syntax.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping --use-strict
+// Flags: --harmony-scoping
// Test for-of syntax.
@@ -35,28 +35,38 @@ function f() { for (x of y) { } }
function f() { for (var x of y) { } }
function f() { for (let x of y) { } }
-assertThrows("function f() { for (x of) { } }", SyntaxError);
-assertThrows("function f() { for (x of y z) { } }", SyntaxError);
-assertThrows("function f() { for (x of y;) { } }", SyntaxError);
+function StrictSyntaxError(s) {
+ try {
+ eval(s);
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ return;
+ }
+ throw "did not throw";
+}
-assertThrows("function f() { for (var x of) { } }", SyntaxError);
-assertThrows("function f() { for (var x of y z) { } }", SyntaxError);
-assertThrows("function f() { for (var x of y;) { } }", SyntaxError);
+StrictSyntaxError("function f() { for (x of) { } }");
+StrictSyntaxError("function f() { for (x of y z) { } }");
+StrictSyntaxError("function f() { for (x of y;) { } }");
-assertThrows("function f() { for (let x of) { } }", SyntaxError);
-assertThrows("function f() { for (let x of y z) { } }", SyntaxError);
-assertThrows("function f() { for (let x of y;) { } }", SyntaxError);
+StrictSyntaxError("function f() { for (var x of) { } }");
+StrictSyntaxError("function f() { for (var x of y z) { } }");
+StrictSyntaxError("function f() { for (var x of y;) { } }");
-assertThrows("function f() { for (of y) { } }", SyntaxError);
-assertThrows("function f() { for (of of) { } }", SyntaxError);
-assertThrows("function f() { for (var of y) { } }", SyntaxError);
-assertThrows("function f() { for (var of of) { } }", SyntaxError);
-assertThrows("function f() { for (let of y) { } }", SyntaxError);
-assertThrows("function f() { for (let of of) { } }", SyntaxError);
+StrictSyntaxError("function f() { for (let x of) { } }");
+StrictSyntaxError("function f() { for (let x of y z) { } }");
+StrictSyntaxError("function f() { for (let x of y;) { } }");
-assertThrows("function f() { for (x = 3 of y) { } }", SyntaxError);
-assertThrows("function f() { for (var x = 3 of y) { } }", SyntaxError);
-assertThrows("function f() { for (let x = 3 of y) { } }", SyntaxError);
+StrictSyntaxError("function f() { for (of y) { } }");
+StrictSyntaxError("function f() { for (of of) { } }");
+StrictSyntaxError("function f() { for (var of y) { } }");
+StrictSyntaxError("function f() { for (var of of) { } }");
+StrictSyntaxError("function f() { for (let of y) { } }");
+StrictSyntaxError("function f() { for (let of of) { } }");
+
+StrictSyntaxError("function f() { for (x = 3 of y) { } }");
+StrictSyntaxError("function f() { for (var x = 3 of y) { } }");
+StrictSyntaxError("function f() { for (let x = 3 of y) { } }");
// Alack, this appears to be valid.
diff --git a/deps/v8/test/mjsunit/es6/object-tostring.js b/deps/v8/test/mjsunit/es6/object-tostring.js
index 26dff14b9d..c73a7686cd 100644
--- a/deps/v8/test/mjsunit/es6/object-tostring.js
+++ b/deps/v8/test/mjsunit/es6/object-tostring.js
@@ -33,7 +33,7 @@ function testToStringTag(className) {
// Using builtin toStringTags
var obj = {};
obj[Symbol.toStringTag] = className;
- assertEquals("[object ~" + className + "]",
+ assertEquals("[object " + className + "]",
Object.prototype.toString.call(obj));
// Getter throws
@@ -50,7 +50,7 @@ function testToStringTag(className) {
Object.defineProperty(obj, Symbol.toStringTag, {
get: function() { return className; }
});
- assertEquals("[object ~" + className + "]",
+ assertEquals("[object " + className + "]",
Object.prototype.toString.call(obj));
// Custom, non-builtin toStringTags
@@ -99,14 +99,14 @@ function testToStringTag(className) {
function testToStringTagNonString(value) {
var obj = {};
obj[Symbol.toStringTag] = value;
- assertEquals("[object ???]", Object.prototype.toString.call(obj));
+ assertEquals("[object Object]", Object.prototype.toString.call(obj));
// With getter
obj = {};
Object.defineProperty(obj, Symbol.toStringTag, {
get: function() { return value; }
});
- assertEquals("[object ???]", Object.prototype.toString.call(obj));
+ assertEquals("[object Object]", Object.prototype.toString.call(obj));
}
[
@@ -131,3 +131,9 @@ function testObjectToStringPropertyDesc() {
assertTrue(desc.configurable);
}
testObjectToStringPropertyDesc();
+
+function testObjectToStringOwnNonStringValue() {
+ var obj = Object.defineProperty({}, Symbol.toStringTag, { value: 1 });
+ assertEquals("[object Object]", ({}).toString.call(obj));
+}
+testObjectToStringOwnNonStringValue();
diff --git a/deps/v8/test/mjsunit/harmony/array-concat.js b/deps/v8/test/mjsunit/harmony/array-concat.js
index 286aefd29a..c1ff92c8c3 100644
--- a/deps/v8/test/mjsunit/harmony/array-concat.js
+++ b/deps/v8/test/mjsunit/harmony/array-concat.js
@@ -245,7 +245,7 @@ function testConcatTypedArray(type, elems, modulo) {
}
(function testConcatSmallTypedArray() {
- var max = [2^8, 2^16, 2^32, false, false];
+ var max = [Math.pow(2, 8), Math.pow(2, 16), Math.pow(2, 32), false, false];
[
Uint8Array,
Uint16Array,
@@ -259,7 +259,7 @@ function testConcatTypedArray(type, elems, modulo) {
(function testConcatLargeTypedArray() {
- var max = [2^8, 2^16, 2^32, false, false];
+ var max = [Math.pow(2, 8), Math.pow(2, 16), Math.pow(2, 32), false, false];
[
Uint8Array,
Uint16Array,
diff --git a/deps/v8/test/mjsunit/harmony/array-from.js b/deps/v8/test/mjsunit/harmony/array-from.js
index e7c9fef7d5..c294786c46 100644
--- a/deps/v8/test/mjsunit/harmony/array-from.js
+++ b/deps/v8/test/mjsunit/harmony/array-from.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-arrays --harmony-generators
+// Flags: --harmony-arrays
(function() {
assertEquals(1, Array.from.length);
@@ -91,21 +91,48 @@ function testArrayFrom(thisArg, constructor) {
return x.toUpperCase();
}), ['T', 'E', 'S', 'T'], constructor);
- this.thisArg = thisArg;
- assertThrows('Array.from.call(thisArg, null)', TypeError);
- assertThrows('Array.from.call(thisArg, undefined)', TypeError);
- assertThrows('Array.from.call(thisArg, [], null)', TypeError);
- assertThrows('Array.from.call(thisArg, [], "noncallable")', TypeError);
+ assertThrows(function() { Array.from.call(thisArg, null); }, TypeError);
+ assertThrows(function() { Array.from.call(thisArg, undefined); }, TypeError);
+ assertThrows(function() { Array.from.call(thisArg, [], null); }, TypeError);
+ assertThrows(function() { Array.from.call(thisArg, [], "noncallable"); },
+ TypeError);
- this.nullIterator = {};
+ var nullIterator = {};
nullIterator[Symbol.iterator] = null;
- assertThrows('Array.from.call(thisArg, nullIterator)', TypeError);
+ assertArrayLikeEquals(Array.from.call(thisArg, nullIterator), [],
+ constructor);
- this.nonObjIterator = {};
+ var nonObjIterator = {};
nonObjIterator[Symbol.iterator] = function() { return "nonObject"; };
- assertThrows('Array.from.call(thisArg, nonObjIterator)', TypeError);
-
- assertThrows('Array.from.call(thisArg, [], null)', TypeError);
+ assertThrows(function() { Array.from.call(thisArg, nonObjIterator); },
+ TypeError);
+
+ assertThrows(function() { Array.from.call(thisArg, [], null); }, TypeError);
+
+ // Ensure iterator is only accessed once, and only invoked once
+ var called = false;
+ var arr = [1, 2, 3];
+ var obj = {};
+
+ // Test order --- only get iterator method once
+ function testIterator() {
+ assertFalse(called, "@@iterator should be called only once");
+ called = true;
+ assertEquals(obj, this);
+ return arr[Symbol.iterator]();
+ }
+ var getCalled = false;
+ Object.defineProperty(obj, Symbol.iterator, {
+ get: function() {
+ assertFalse(getCalled, "@@iterator should be accessed only once");
+ getCalled = true;
+ return testIterator;
+ },
+ set: function() {
+ assertUnreachable("@@iterator should not be set");
+ }
+ });
+ assertArrayLikeEquals(Array.from.call(thisArg, obj), [1, 2, 3], constructor);
}
function Other() {}
@@ -118,6 +145,7 @@ testArrayFrom({}, Array);
testArrayFrom(Object, Object);
testArrayFrom(Other, Other);
testArrayFrom(Math.cos, Array);
-testArrayFrom(boundFn, Array);
+testArrayFrom(Math.cos.bind(Math), Array);
+testArrayFrom(boundFn, boundFn);
})();
diff --git a/deps/v8/test/mjsunit/harmony/array-of.js b/deps/v8/test/mjsunit/harmony/array-of.js
index c0a8ed183e..adf7cb547c 100644
--- a/deps/v8/test/mjsunit/harmony/array-of.js
+++ b/deps/v8/test/mjsunit/harmony/array-of.js
@@ -159,6 +159,26 @@ assertEquals(Array.of.length, 0);
assertThrows(function() { new Array.of() }, TypeError); // not a constructor
// When the this-value passed in is not a constructor, the result is an array.
-[undefined, null, false, "cow"].forEach(function(val) {
- assertEquals(Array.isArray(Array.of(val)), true);
+[
+ undefined,
+ null,
+ false,
+ "cow",
+ NaN,
+ 67,
+ Infinity,
+ -Infinity,
+ Math.cos, // builtin functions with no [[Construct]] slot
+ Math.cos.bind(Math) // bound builtin functions with no [[Construct]] slot
+].forEach(function(val) {
+ assertEquals(Array.isArray(Array.of.call(val, val)), true);
});
+
+
+(function testBoundConstructor() {
+ var boundFn = (function() {}).bind(null);
+ var instance = Array.of.call(boundFn, 1, 2, 3);
+ assertEquals(instance.length, 3);
+ assertEquals(instance instanceof boundFn, true);
+ assertEquals(Array.isArray(instance), false);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/class-computed-property-names-super.js b/deps/v8/test/mjsunit/harmony/class-computed-property-names-super.js
new file mode 100644
index 0000000000..5a5db67d48
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/class-computed-property-names-super.js
@@ -0,0 +1,76 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-computed-property-names --harmony-sloppy
+// Flags: --harmony-classes --allow-natives-syntax
+
+
+function ID(x) {
+ return x;
+}
+
+
+(function TestComputedMethodSuper() {
+ class Base {
+ m() {
+ return ' base m';
+ }
+ }
+ class Derived extends Base {
+ ['a']() { return 'a' + super.m(); }
+ [ID('b')]() { return 'b' + super.m(); }
+ [0]() { return '0' + super.m(); }
+ [ID(1)]() { return '1' + super.m(); }
+ }
+
+ assertSame(Derived.prototype, Derived.prototype.a[%HomeObjectSymbol()]);
+
+ assertEquals('a base m', new Derived().a());
+ assertEquals('b base m', new Derived().b());
+ assertEquals('0 base m', new Derived()[0]());
+ assertEquals('1 base m', new Derived()[1]());
+})();
+
+
+(function TestComputedGetterSuper() {
+ class Base {
+ m() {
+ return ' base m';
+ }
+ }
+ class Derived extends Base {
+ get ['a']() { return 'a' + super.m(); }
+ get [ID('b')]() { return 'b' + super.m(); }
+ get [0]() { return '0' + super.m(); }
+ get [ID(1)]() { return '1' + super.m(); }
+ }
+ assertEquals('a base m', new Derived().a);
+ assertEquals('b base m', new Derived().b);
+ assertEquals('0 base m', new Derived()[0]);
+ assertEquals('1 base m', new Derived()[1]);
+})();
+
+
+(function TestComputedSetterSuper() {
+ var value;
+ class Base {
+ m(name, v) {
+ value = name + ' ' + v;
+ }
+ }
+ class Derived extends Base {
+ set ['a'](v) { super.m('a', v); }
+ set [ID('b')](v) { super.m('b', v); }
+ set [0](v) { super.m('0', v); }
+ set [ID(1)](v) { super.m('1', v); }
+ }
+ new Derived().a = 2;
+ assertEquals('a 2', value);
+ new Derived().b = 3;
+ assertEquals('b 3', value);
+ new Derived()[0] = 4;
+ assertEquals('0 4', value);
+ new Derived()[1] = 5;
+ assertEquals('1 5', value);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/class-property-name-eval-arguments.js b/deps/v8/test/mjsunit/harmony/class-property-name-eval-arguments.js
new file mode 100644
index 0000000000..cc53030920
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/class-property-name-eval-arguments.js
@@ -0,0 +1,79 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-classes --harmony-sloppy
+
+
+(function Method() {
+ class C {
+ eval() {
+ return 1;
+ }
+ arguments() {
+ return 2;
+ }
+ static eval() {
+ return 3;
+ }
+ static arguments() {
+ return 4;
+ }
+ };
+
+ assertEquals(1, new C().eval());
+ assertEquals(2, new C().arguments());
+ assertEquals(3, C.eval());
+ assertEquals(4, C.arguments());
+})();
+
+
+(function Getters() {
+ class C {
+ get eval() {
+ return 1;
+ }
+ get arguments() {
+ return 2;
+ }
+ static get eval() {
+ return 3;
+ }
+ static get arguments() {
+ return 4;
+ }
+ };
+
+ assertEquals(1, new C().eval);
+ assertEquals(2, new C().arguments);
+ assertEquals(3, C.eval);
+ assertEquals(4, C.arguments);
+})();
+
+
+(function Setters() {
+ var x = 0;
+ class C {
+ set eval(v) {
+ x = v;
+ }
+ set arguments(v) {
+ x = v;
+ }
+ static set eval(v) {
+ x = v;
+ }
+ static set arguments(v) {
+ x = v;
+ }
+ };
+
+ new C().eval = 1;
+ assertEquals(1, x);
+ new C().arguments = 2;
+ assertEquals(2, x);
+ C.eval = 3;
+ assertEquals(3, x);
+ C.arguments = 4;
+ assertEquals(4, x);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/classes-experimental.js b/deps/v8/test/mjsunit/harmony/classes-experimental.js
new file mode 100644
index 0000000000..e7ebbda735
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/classes-experimental.js
@@ -0,0 +1,339 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+
+'use strict';
+(function TestArgumentsAccess() {
+ class Base {
+ constructor() {
+ assertEquals(2, arguments.length);
+ assertEquals(1, arguments[0]);
+ assertEquals(2, arguments[1]);
+ }
+ }
+
+ let b = new Base(1,2);
+
+ class Subclass extends Base {
+ constructor() {
+ assertEquals(2, arguments.length);
+ assertEquals(3, arguments[0]);
+ assertEquals(4, arguments[1]);
+ super(1,2);
+ }
+ }
+
+ let s = new Subclass(3,4);
+ assertEquals(0, Subclass.length);
+
+ class Subclass2 extends Base {
+ constructor(x,y) {
+ assertEquals(2, arguments.length);
+ assertEquals(3, arguments[0]);
+ assertEquals(4, arguments[1]);
+ super(1,2);
+ }
+ }
+
+ let s2 = new Subclass2(3,4);
+ assertEquals(2, Subclass2.length);
+}());
+
+(function TestThisAccessRestriction() {
+ class Base {
+ constructor(a, b) {
+ let o = new Object();
+ o.prp = a + b;
+ return o;
+ }
+ }
+
+ class Subclass extends Base {
+ constructor(a, b) {
+ var exn;
+ try {
+ this.prp1 = 3;
+ } catch (e) {
+ exn = e;
+ }
+ assertTrue(exn instanceof ReferenceError);
+ super(a, b);
+ assertSame(a + b, this.prp);
+ assertSame(undefined, this.prp1);
+ assertFalse(this.hasOwnProperty("prp1"));
+ return this;
+ }
+ }
+
+ let b = new Base(1, 2);
+ assertSame(3, b.prp);
+
+
+ let s = new Subclass(2, -1);
+ assertSame(1, s.prp);
+ assertSame(undefined, s.prp1);
+ assertFalse(s.hasOwnProperty("prp1"));
+
+ class Subclass2 extends Base {
+ constructor(x) {
+ super(1,2);
+
+ if (x < 0) return;
+
+ let called = false;
+ function tmp() { called = true; return 3; }
+ var exn = null;
+ try {
+ super(tmp(),4);
+ } catch (e) { exn = e; }
+ assertTrue(exn instanceof ReferenceError);
+ assertTrue(called);
+ }
+ }
+
+ var s2 = new Subclass2(1);
+ assertSame(3, s2.prp);
+
+ var s3 = new Subclass2(-1);
+ assertSame(3, s3.prp);
+
+ assertThrows(function() { Subclass.call(new Object(), 1, 2); }, TypeError);
+ assertThrows(function() { Base.call(new Object(), 1, 2); }, TypeError);
+
+ class BadSubclass extends Base {
+ constructor() {}
+ }
+
+ assertThrows(function() { new BadSubclass(); }, ReferenceError);
+}());
+
+(function TestThisCheckOrdering() {
+ let baseCalled = 0;
+ class Base {
+ constructor() { baseCalled++ }
+ }
+
+ let fCalled = 0;
+ function f() { fCalled++; return 3; }
+
+ class Subclass1 extends Base {
+ constructor() {
+ baseCalled = 0;
+ super();
+ assertEquals(1, baseCalled);
+ let obj = this;
+
+ let exn = null;
+ baseCalled = 0;
+ fCalled = 0;
+ try {
+ super(f());
+ } catch (e) { exn = e; }
+ assertTrue(exn instanceof ReferenceError);
+ assertEquals(1, fCalled);
+ assertEquals(1, baseCalled);
+ assertSame(obj, this);
+
+ exn = null;
+ baseCalled = 0;
+ fCalled = 0;
+ try {
+ super(super(), f());
+ } catch (e) { exn = e; }
+ assertTrue(exn instanceof ReferenceError);
+ assertEquals(0, fCalled);
+ assertEquals(1, baseCalled);
+ assertSame(obj, this);
+
+ exn = null;
+ baseCalled = 0;
+ fCalled = 0;
+ try {
+ super(f(), super());
+ } catch (e) { exn = e; }
+ assertTrue(exn instanceof ReferenceError);
+ assertEquals(1, fCalled);
+ assertEquals(1, baseCalled);
+ assertSame(obj, this);
+ }
+ }
+
+ new Subclass1();
+}());
+
+
+(function TestPrototypeWiring() {
+ class Base {
+ constructor(x) {
+ this.foobar = x;
+ }
+ }
+
+ class Subclass extends Base {
+ constructor(x) {
+ super(x);
+ }
+ }
+
+ let s = new Subclass(1);
+ assertSame(1, s.foobar);
+ assertSame(Subclass.prototype, s.__proto__);
+
+ let s1 = new Subclass(1, 2);
+ assertSame(1, s1.foobar);
+ assertTrue(s1.__proto__ === Subclass.prototype);
+
+ let s2 = new Subclass();
+ assertSame(undefined, s2.foobar);
+ assertSame(Subclass.prototype, s2.__proto__);
+ assertThrows(function() { Subclass(1); }, TypeError);
+ assertThrows(function() { Subclass(1,2,3,4); }, TypeError);
+
+ class Subclass2 extends Subclass {
+ constructor() {
+ super(5, 6, 7);
+ }
+ }
+
+ let ss2 = new Subclass2();
+ assertSame(5, ss2.foobar);
+ assertSame(Subclass2.prototype, ss2.__proto__);
+
+ class Subclass3 extends Base {
+ constructor(x,y) {
+ super(x + y);
+ }
+ }
+
+ let ss3 = new Subclass3(27,42-27);
+ assertSame(42, ss3.foobar);
+ assertSame(Subclass3.prototype, ss3.__proto__);
+}());
+
+(function TestSublclassingBuiltins() {
+ class ExtendedUint8Array extends Uint8Array {
+ constructor() {
+ super(10);
+ this[0] = 255;
+ this[1] = 0xFFA;
+ }
+ }
+
+ var eua = new ExtendedUint8Array();
+ assertEquals(10, eua.length);
+ assertEquals(10, eua.byteLength);
+ assertEquals(0xFF, eua[0]);
+ assertEquals(0xFA, eua[1]);
+ assertSame(ExtendedUint8Array.prototype, eua.__proto__);
+ assertEquals("[object Uint8Array]", Object.prototype.toString.call(eua));
+}());
+
+(function TestSubclassingNull() {
+ let N = null;
+
+ class Foo extends N {
+ constructor(x,y) {
+ assertSame(1, x);
+ assertSame(2, y);
+ return {};
+ }
+ }
+
+ new Foo(1,2);
+}());
+
+(function TestSubclassBinding() {
+ class Base {
+ constructor(x, y) {
+ this.x = x;
+ this.y = y;
+ }
+ }
+
+ let obj = {};
+ class Subclass extends Base {
+ constructor(x,y) {
+ super(x,y);
+ assertTrue(this !== obj);
+ }
+ }
+
+ let f = Subclass.bind(obj);
+ assertThrows(function () { f(1, 2); }, TypeError);
+ let s = new f(1, 2);
+ assertSame(1, s.x);
+ assertSame(2, s.y);
+ assertSame(Subclass.prototype, s.__proto__);
+
+ let s1 = new f(1);
+ assertSame(1, s1.x);
+ assertSame(undefined, s1.y);
+ assertSame(Subclass.prototype, s1.__proto__);
+
+ let g = Subclass.bind(obj, 1);
+ assertThrows(function () { g(8); }, TypeError);
+ let s2 = new g(8);
+ assertSame(1, s2.x);
+ assertSame(8, s2.y);
+ assertSame(Subclass.prototype, s.__proto__);
+}());
+
+
+(function TestDefaultConstructor() {
+ class Base1 { }
+ assertThrows(function() { Base1(); }, TypeError);
+
+ class Subclass1 extends Base1 { }
+
+ assertThrows(function() { Subclass1(); }, TypeError);
+
+ let s1 = new Subclass1();
+ assertSame(s1.__proto__, Subclass1.prototype);
+
+ class Base2 {
+ constructor(x, y) {
+ this.x = x;
+ this.y = y;
+ }
+ }
+
+ class Subclass2 extends Base2 {};
+
+ let s2 = new Subclass2(1, 2);
+
+ assertSame(s2.__proto__, Subclass2.prototype);
+ assertSame(1, s2.x);
+ assertSame(2, s2.y);
+
+ let f = Subclass2.bind({}, 3, 4);
+ let s2prime = new f();
+ assertSame(s2prime.__proto__, Subclass2.prototype);
+ assertSame(3, s2prime.x);
+ assertSame(4, s2prime.y);
+
+ let obj = {};
+ class Base3 {
+ constructor() {
+ return obj;
+ }
+ }
+
+ class Subclass3 extends Base3 {};
+
+ let s3 = new Subclass3();
+ assertSame(obj, s3);
+
+ class ExtendedUint8Array extends Uint8Array { }
+
+ var eua = new ExtendedUint8Array(10);
+ assertEquals(10, eua.length);
+ assertEquals(10, eua.byteLength);
+ eua[0] = 0xFF;
+ eua[1] = 0xFFA;
+ assertEquals(0xFF, eua[0]);
+ assertEquals(0xFA, eua[1]);
+ assertSame(ExtendedUint8Array.prototype, eua.__proto__);
+ assertEquals("[object Uint8Array]", Object.prototype.toString.call(eua));
+}());
diff --git a/deps/v8/test/mjsunit/harmony/classes-lazy-parsing.js b/deps/v8/test/mjsunit/harmony/classes-lazy-parsing.js
new file mode 100644
index 0000000000..2c0301957a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/classes-lazy-parsing.js
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-classes --min-preparse-length=0
+
+'use strict';
+
+class Base {
+ m() {
+ return 42;
+ }
+}
+
+class Derived extends Base {
+ m() {
+ return super.m();
+ }
+}
+
+assertEquals(42, new Derived().m());
diff --git a/deps/v8/test/mjsunit/harmony/classes-subclass-arrays.js b/deps/v8/test/mjsunit/harmony/classes-subclass-arrays.js
new file mode 100644
index 0000000000..e0363c715b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/classes-subclass-arrays.js
@@ -0,0 +1,150 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+'use strict';
+
+(function TestDefaultConstructor() {
+ class Stack extends Array { }
+ {
+ let s1 = new Stack();
+ assertSame(Stack.prototype, s1.__proto__);
+ assertTrue(Array.isArray(s1));
+ assertSame(0, s1.length);
+ s1[0] = 'xyz';
+ assertSame(1, s1.length);
+ assertSame('xyz', s1[0]);
+ s1.push(42);
+ assertSame(2, s1.length);
+ assertSame('xyz', s1[0]);
+ assertSame(42, s1[1]);
+ }
+
+ {
+ let s2 = new Stack(10);
+ assertSame(Stack.prototype, s2.__proto__);
+ assertTrue(Array.isArray(s2));
+ assertSame(10, s2.length);
+ assertSame(undefined, s2[0]);
+ }
+
+ {
+ let a = [1,2,3];
+ let s3 = new Stack(a);
+ assertSame(Stack.prototype, s3.__proto__);
+ assertTrue(Array.isArray(s3));
+ assertSame(1, s3.length);
+ assertSame(a, s3[0]);
+ }
+
+ {
+ let s4 = new Stack(1, 2, 3);
+ assertSame(Stack.prototype, s4.__proto__);
+ assertTrue(Array.isArray(s4));
+ assertSame(3, s4.length);
+ assertSame(1, s4[0]);
+ assertSame(2, s4[1]);
+ assertSame(3, s4[2]);
+ }
+
+ {
+ let s5 = new Stack(undefined, undefined, undefined);
+ assertSame(Stack.prototype, s5.__proto__);
+ assertTrue(Array.isArray(s5));
+ assertSame(3, s5.length);
+ assertSame(undefined, s5[0]);
+ assertSame(undefined, s5[1]);
+ assertSame(undefined, s5[2]);
+ }
+}());
+
+
+(function TestEmptyArgsSuper() {
+ class Stack extends Array {
+ constructor() { super(); }
+ }
+ let s1 = new Stack();
+ assertSame(Stack.prototype, s1.__proto__);
+ assertTrue(Array.isArray(s1));
+ assertSame(0, s1.length);
+ s1[0] = 'xyz';
+ assertSame(1, s1.length);
+ assertSame('xyz', s1[0]);
+ s1.push(42);
+ assertSame(2, s1.length);
+ assertSame('xyz', s1[0]);
+ assertSame(42, s1[1]);
+}());
+
+
+(function TestOneArgSuper() {
+ class Stack extends Array {
+ constructor(x) {
+ super(x);
+ }
+ }
+
+ {
+ let s2 = new Stack(10, 'ignored arg');
+ assertSame(Stack.prototype, s2.__proto__);
+ assertTrue(Array.isArray(s2));
+ assertSame(10, s2.length);
+ assertSame(undefined, s2[0]);
+ }
+
+ {
+ let a = [1,2,3];
+ let s3 = new Stack(a, 'ignored arg');
+ assertSame(Stack.prototype, s3.__proto__);
+ assertTrue(Array.isArray(s3));
+ assertSame(1, s3.length);
+ assertSame(a, s3[0]);
+ }
+}());
+
+
+(function TestMultipleArgsSuper() {
+ class Stack extends Array {
+ constructor(x, y, z) {
+ super(x, y, z);
+ }
+ }
+ {
+ let s4 = new Stack(1, 2, 3, 4, 5);
+ assertSame(Stack.prototype, s4.__proto__);
+ assertTrue(Array.isArray(s4));
+ assertSame(3, s4.length);
+ assertSame(1, s4[0]);
+ assertSame(2, s4[1]);
+ assertSame(3, s4[2]);
+ }
+
+ {
+ let s5 = new Stack(undefined);
+ assertSame(Stack.prototype, s5.__proto__);
+ assertTrue(Array.isArray(s5));
+ assertTrue(s5.__proto__ == Stack.prototype);
+ assertSame(3, s5.length);
+ assertSame(undefined, s5[0]);
+ assertSame(undefined, s5[1]);
+ assertSame(undefined, s5[2]);
+ }
+}());
+
+
+(function TestArrayConcat() {
+ class Stack extends Array { }
+ let s1 = new Stack(1,2,3);
+
+ assertArrayEquals([1,2,3,4,5,6], s1.concat([4,5,6]));
+ assertArrayEquals([4,5,6,1,2,3], [4,5,6].concat(s1));
+}());
+
+
+(function TestJSONStringify() {
+ class Stack extends Array { }
+
+ let s1 = new Stack(1,2,3);
+ assertSame("[1,2,3]", JSON.stringify(s1));
+}());
diff --git a/deps/v8/test/mjsunit/harmony/classes.js b/deps/v8/test/mjsunit/harmony/classes.js
index 29ffbf8d7d..1b19a13193 100644
--- a/deps/v8/test/mjsunit/harmony/classes.js
+++ b/deps/v8/test/mjsunit/harmony/classes.js
@@ -139,6 +139,7 @@
class C extends B {
constructor() {
+ super();
calls++;
assertEquals(42, super.x);
}
@@ -186,7 +187,7 @@
function assertMethodDescriptor(object, name) {
var descr = Object.getOwnPropertyDescriptor(object, name);
assertTrue(descr.configurable);
- assertTrue(descr.enumerable);
+ assertFalse(descr.enumerable);
assertTrue(descr.writable);
assertEquals('function', typeof descr.value);
assertFalse('prototype' in descr.value);
@@ -196,8 +197,9 @@ function assertMethodDescriptor(object, name) {
function assertGetterDescriptor(object, name) {
var descr = Object.getOwnPropertyDescriptor(object, name);
assertTrue(descr.configurable);
- assertTrue(descr.enumerable);
+ assertFalse(descr.enumerable);
assertEquals('function', typeof descr.get);
+ assertFalse('prototype' in descr.get);
assertEquals(undefined, descr.set);
}
@@ -205,18 +207,21 @@ function assertGetterDescriptor(object, name) {
function assertSetterDescriptor(object, name) {
var descr = Object.getOwnPropertyDescriptor(object, name);
assertTrue(descr.configurable);
- assertTrue(descr.enumerable);
+ assertFalse(descr.enumerable);
assertEquals(undefined, descr.get);
assertEquals('function', typeof descr.set);
+ assertFalse('prototype' in descr.set);
}
function assertAccessorDescriptor(object, name) {
var descr = Object.getOwnPropertyDescriptor(object, name);
assertTrue(descr.configurable);
- assertTrue(descr.enumerable);
+ assertFalse(descr.enumerable);
assertEquals('function', typeof descr.get);
assertEquals('function', typeof descr.set);
+ assertFalse('prototype' in descr.get);
+ assertFalse('prototype' in descr.set);
}
@@ -609,8 +614,8 @@ function assertAccessorDescriptor(object, name) {
(function TestDefaultConstructorNoCrash() {
// Regression test for https://code.google.com/p/v8/issues/detail?id=3661
class C {}
- assertEquals(undefined, C());
- assertEquals(undefined, C(1));
+ assertThrows(function () {C();}, TypeError);
+ assertThrows(function () {C(1);}, TypeError);
assertTrue(new C() instanceof C);
assertTrue(new C(1) instanceof C);
})();
@@ -628,8 +633,8 @@ function assertAccessorDescriptor(object, name) {
assertEquals(1, calls);
calls = 0;
- Derived();
- assertEquals(1, calls);
+ assertThrows(function() { Derived(); }, TypeError);
+ assertEquals(0, calls);
})();
@@ -652,9 +657,7 @@ function assertAccessorDescriptor(object, name) {
var arr = new Array(100);
var obj = {};
- Derived.apply(obj, arr);
- assertEquals(100, args.length);
- assertEquals(obj, self);
+ assertThrows(function() {Derived.apply(obj, arr);}, TypeError);
})();
@@ -779,72 +782,73 @@ function assertAccessorDescriptor(object, name) {
})();
-(function TestSuperCallSyntacticRestriction() {
- assertThrows(function() {
- class C {
+(function TestThisAccessRestriction() {
+ class Base {}
+ (function() {
+ class C extends Base {
constructor() {
var y;
super();
}
}; new C();
- }, TypeError);
+ }());
assertThrows(function() {
- class C {
+ class C extends Base {
constructor() {
super(this.x);
}
}; new C();
- }, TypeError);
+ }, ReferenceError);
assertThrows(function() {
- class C {
+ class C extends Base {
constructor() {
super(this);
}
}; new C();
- }, TypeError);
+ }, ReferenceError);
assertThrows(function() {
- class C {
+ class C extends Base {
constructor() {
super.method();
super(this);
}
}; new C();
- }, TypeError);
+ }, ReferenceError);
assertThrows(function() {
- class C {
+ class C extends Base {
constructor() {
super(super.method());
}
}; new C();
- }, TypeError);
+ }, ReferenceError);
assertThrows(function() {
- class C {
+ class C extends Base {
constructor() {
super(super());
}
}; new C();
- }, TypeError);
+ }, ReferenceError);
assertThrows(function() {
- class C {
+ class C extends Base {
constructor() {
super(1, 2, Object.getPrototypeOf(this));
}
}; new C();
- }, TypeError);
- assertThrows(function() {
- class C {
+ }, ReferenceError);
+ (function() {
+ class C extends Base {
constructor() {
{ super(1, 2); }
}
}; new C();
- }, TypeError);
- assertThrows(function() {
- class C {
+ }());
+ (function() {
+ class C extends Base {
constructor() {
if (1) super();
}
}; new C();
- }, TypeError);
+ }());
class C1 extends Object {
constructor() {
@@ -870,10 +874,4 @@ function assertAccessorDescriptor(object, name) {
}
};
new C3();
-
- class C4 extends Object {
- constructor() {
- super(new super());
- }
- }; new C4();
}());
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names-classes.js b/deps/v8/test/mjsunit/harmony/computed-property-names-classes.js
new file mode 100644
index 0000000000..4e50f8a461
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/computed-property-names-classes.js
@@ -0,0 +1,390 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+// Flags: --harmony-computed-property-names --harmony-classes
+
+
+function ID(x) {
+ return x;
+}
+
+
+(function TestClassMethodString() {
+ class C {
+ a() { return 'A'}
+ ['b']() { return 'B'; }
+ c() { return 'C'; }
+ [ID('d')]() { return 'D'; }
+ }
+ assertEquals('A', new C().a());
+ assertEquals('B', new C().b());
+ assertEquals('C', new C().c());
+ assertEquals('D', new C().d());
+ assertArrayEquals([], Object.keys(C.prototype));
+ assertArrayEquals(['constructor', 'a', 'b', 'c', 'd'],
+ Object.getOwnPropertyNames(C.prototype));
+})();
+
+
+(function TestClassMethodNumber() {
+ class C {
+ a() { return 'A'; }
+ [1]() { return 'B'; }
+ c() { return 'C'; }
+ [ID(2)]() { return 'D'; }
+ }
+ assertEquals('A', new C().a());
+ assertEquals('B', new C()[1]());
+ assertEquals('C', new C().c());
+ assertEquals('D', new C()[2]());
+ // Array indexes first.
+ assertArrayEquals([], Object.keys(C.prototype));
+ assertArrayEquals(['1', '2', 'constructor', 'a', 'c'],
+ Object.getOwnPropertyNames(C.prototype));
+})();
+
+
+(function TestClassMethodSymbol() {
+ var sym1 = Symbol();
+ var sym2 = Symbol();
+ class C {
+ a() { return 'A'; }
+ [sym1]() { return 'B'; }
+ c() { return 'C'; }
+ [ID(sym2)]() { return 'D'; }
+ }
+ assertEquals('A', new C().a());
+ assertEquals('B', new C()[sym1]());
+ assertEquals('C', new C().c());
+ assertEquals('D', new C()[sym2]());
+ assertArrayEquals([], Object.keys(C.prototype));
+ assertArrayEquals(['constructor', 'a', 'c'],
+ Object.getOwnPropertyNames(C.prototype));
+ assertArrayEquals([sym1, sym2], Object.getOwnPropertySymbols(C.prototype));
+})();
+
+
+
+(function TestStaticClassMethodString() {
+ class C {
+ static a() { return 'A'}
+ static ['b']() { return 'B'; }
+ static c() { return 'C'; }
+ static ['d']() { return 'D'; }
+ }
+ assertEquals('A', C.a());
+ assertEquals('B', C.b());
+ assertEquals('C', C.c());
+ assertEquals('D', C.d());
+ assertArrayEquals([], Object.keys(C));
+ // TODO(arv): It is not clear that we are adding the "standard" properties
+ // in the right order. As far as I can tell the spec adds them in alphabetical
+ // order.
+ assertArrayEquals(['length', 'name', 'arguments', 'caller', 'prototype',
+ 'a', 'b', 'c', 'd'],
+ Object.getOwnPropertyNames(C));
+})();
+
+
+(function TestStaticClassMethodNumber() {
+ class C {
+ static a() { return 'A'; }
+ static [1]() { return 'B'; }
+ static c() { return 'C'; }
+ static [2]() { return 'D'; }
+ }
+ assertEquals('A', C.a());
+ assertEquals('B', C[1]());
+ assertEquals('C', C.c());
+ assertEquals('D', C[2]());
+ // Array indexes first.
+ assertArrayEquals([], Object.keys(C));
+ assertArrayEquals(['1', '2', 'length', 'name', 'arguments', 'caller',
+ 'prototype', 'a', 'c'], Object.getOwnPropertyNames(C));
+})();
+
+
+(function TestStaticClassMethodSymbol() {
+ var sym1 = Symbol();
+ var sym2 = Symbol();
+ class C {
+ static a() { return 'A'; }
+ static [sym1]() { return 'B'; }
+ static c() { return 'C'; }
+ static [sym2]() { return 'D'; }
+ }
+ assertEquals('A', C.a());
+ assertEquals('B', C[sym1]());
+ assertEquals('C', C.c());
+ assertEquals('D', C[sym2]());
+ assertArrayEquals([], Object.keys(C));
+ assertArrayEquals(['length', 'name', 'arguments', 'caller', 'prototype',
+ 'a', 'c'],
+ Object.getOwnPropertyNames(C));
+ assertArrayEquals([sym1, sym2], Object.getOwnPropertySymbols(C));
+})();
+
+
+
+function assertIteratorResult(value, done, result) {
+ assertEquals({ value: value, done: done}, result);
+}
+
+
+(function TestGeneratorComputedName() {
+ class C {
+ *['a']() {
+ yield 1;
+ yield 2;
+ }
+ }
+ var iter = new C().a();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertArrayEquals([], Object.keys(C.prototype));
+ assertArrayEquals(['constructor', 'a'],
+ Object.getOwnPropertyNames(C.prototype));
+})();
+
+
+(function TestToNameSideEffects() {
+ var counter = 0;
+ var key1 = {
+ toString: function() {
+ assertEquals(0, counter++);
+ return 'b';
+ }
+ };
+ var key2 = {
+ toString: function() {
+ assertEquals(1, counter++);
+ return 'd';
+ }
+ };
+ class C {
+ a() { return 'A'; }
+ [key1]() { return 'B'; }
+ c() { return 'C'; }
+ [key2]() { return 'D'; }
+ }
+ assertEquals(2, counter);
+ assertEquals('A', new C().a());
+ assertEquals('B', new C().b());
+ assertEquals('C', new C().c());
+ assertEquals('D', new C().d());
+ assertArrayEquals([], Object.keys(C.prototype));
+ assertArrayEquals(['constructor', 'a', 'b', 'c', 'd'],
+ Object.getOwnPropertyNames(C.prototype));
+})();
+
+
+(function TestToNameSideEffectsNumbers() {
+ var counter = 0;
+ var key1 = {
+ valueOf: function() {
+ assertEquals(0, counter++);
+ return 1;
+ },
+ toString: null
+ };
+ var key2 = {
+ valueOf: function() {
+ assertEquals(1, counter++);
+ return 2;
+ },
+ toString: null
+ };
+
+ class C {
+ a() { return 'A'; }
+ [key1]() { return 'B'; }
+ c() { return 'C'; }
+ [key2]() { return 'D'; }
+ }
+ assertEquals(2, counter);
+ assertEquals('A', new C().a());
+ assertEquals('B', new C()[1]());
+ assertEquals('C', new C().c());
+ assertEquals('D', new C()[2]());
+ // Array indexes first.
+ assertArrayEquals([], Object.keys(C.prototype));
+ assertArrayEquals(['1', '2', 'constructor', 'a', 'c'],
+ Object.getOwnPropertyNames(C.prototype));
+})();
+
+
+(function TestGetter() {
+ class C {
+ get ['a']() {
+ return 'A';
+ }
+ }
+ assertEquals('A', new C().a);
+
+ class C2 {
+ get b() {
+ assertUnreachable();
+ }
+ get ['b']() {
+ return 'B';
+ }
+ }
+ assertEquals('B', new C2().b);
+
+ class C3 {
+ get c() {
+ assertUnreachable();
+ }
+ get ['c']() {
+ assertUnreachable();
+ }
+ get ['c']() {
+ return 'C';
+ }
+ }
+ assertEquals('C', new C3().c);
+
+ class C4 {
+ get ['d']() {
+ assertUnreachable();
+ }
+ get d() {
+ return 'D';
+ }
+ }
+ assertEquals('D', new C4().d);
+})();
+
+
+(function TestSetter() {
+ var calls = 0;
+ class C {
+ set ['a'](_) {
+ calls++;
+ }
+ }
+ new C().a = 'A';
+ assertEquals(1, calls);
+
+ calls = 0;
+ class C2 {
+ set b(_) {
+ assertUnreachable();
+ }
+ set ['b'](_) {
+ calls++;
+ }
+ }
+ new C2().b = 'B';
+ assertEquals(1, calls);
+
+ calls = 0;
+ class C3 {
+ set c(_) {
+ assertUnreachable()
+ }
+ set ['c'](_) {
+ assertUnreachable()
+ }
+ set ['c'](_) {
+ calls++
+ }
+ }
+ new C3().c = 'C';
+ assertEquals(1, calls);
+
+ calls = 0;
+ class C4 {
+ set ['d'](_) {
+ assertUnreachable()
+ }
+ set d(_) {
+ calls++
+ }
+ }
+ new C4().d = 'D';
+ assertEquals(1, calls);
+})();
+
+
+(function TestPrototype() {
+ // Normally a static prototype property is not allowed.
+ class C {
+ static ['prototype']() {
+ return 1;
+ }
+ }
+ assertEquals(1, C.prototype());
+
+ class C2 {
+ static get ['prototype']() {
+ return 2;
+ }
+ }
+ assertEquals(2, C2.prototype);
+
+ var calls = 0;
+ class C3 {
+ static set ['prototype'](x) {
+ assertEquals(3, x);
+ calls++;
+ }
+ }
+ C3.prototype = 3;
+ assertEquals(1, calls);
+
+ class C4 {
+ static *['prototype']() {
+ yield 1;
+ yield 2;
+ }
+ }
+ var iter = C4.prototype();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+})();
+
+
+(function TestConstructor() {
+ // Normally a constructor property is not allowed.
+ class C {
+ ['constructor']() {
+ return 1;
+ }
+ }
+ assertTrue(C !== C.prototype.constructor);
+ assertEquals(1, new C().constructor());
+
+ class C2 {
+ get ['constructor']() {
+ return 2;
+ }
+ }
+ assertEquals(2, new C2().constructor);
+
+ var calls = 0;
+ class C3 {
+ set ['constructor'](x) {
+ assertEquals(3, x);
+ calls++;
+ }
+ }
+ new C3().constructor = 3;
+ assertEquals(1, calls);
+
+ class C4 {
+ *['constructor']() {
+ yield 1;
+ yield 2;
+ }
+ }
+ var iter = new C4().constructor();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names-object-literals-methods.js b/deps/v8/test/mjsunit/harmony/computed-property-names-object-literals-methods.js
new file mode 100644
index 0000000000..135d09854e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/computed-property-names-object-literals-methods.js
@@ -0,0 +1,121 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-computed-property-names --harmony-object-literals
+
+
+function ID(x) {
+ return x;
+}
+
+
+(function TestMethodComputedNameString() {
+ var object = {
+ a() { return 'A'},
+ ['b']() { return 'B'; },
+ c() { return 'C'; },
+ [ID('d')]() { return 'D'; },
+ };
+ assertEquals('A', object.a());
+ assertEquals('B', object.b());
+ assertEquals('C', object.c());
+ assertEquals('D', object.d());
+ assertArrayEquals(['a', 'b', 'c', 'd'], Object.keys(object));
+})();
+
+
+(function TestMethodComputedNameNumber() {
+ var object = {
+ a() { return 'A'; },
+ [1]() { return 'B'; },
+ c() { return 'C'; },
+ [ID(2)]() { return 'D'; },
+ };
+ assertEquals('A', object.a());
+ assertEquals('B', object[1]());
+ assertEquals('C', object.c());
+ assertEquals('D', object[2]());
+ // Array indexes first.
+ assertArrayEquals(['1', '2', 'a', 'c'], Object.keys(object));
+})();
+
+
+(function TestMethodComputedNameSymbol() {
+ var sym1 = Symbol();
+ var sym2 = Symbol();
+ var object = {
+ a() { return 'A'; },
+ [sym1]() { return 'B'; },
+ c() { return 'C'; },
+ [ID(sym2)]() { return 'D'; },
+ };
+ assertEquals('A', object.a());
+ assertEquals('B', object[sym1]());
+ assertEquals('C', object.c());
+ assertEquals('D', object[sym2]());
+ assertArrayEquals(['a', 'c'], Object.keys(object));
+ assertArrayEquals([sym1, sym2], Object.getOwnPropertySymbols(object));
+})();
+
+
+function assertIteratorResult(value, done, result) {
+ assertEquals({ value: value, done: done}, result);
+}
+
+
+(function TestGeneratorComputedName() {
+ var object = {
+ *['a']() {
+ yield 1;
+ yield 2;
+ }
+ };
+ var iter = object.a();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertArrayEquals(['a'], Object.keys(object));
+})();
+
+
+(function TestToNameSideEffects() {
+ var counter = 0;
+ var key1 = {
+ toString: function() {
+ assertEquals(0, counter++);
+ return 'b';
+ }
+ };
+ var key2 = {
+ toString: function() {
+ assertEquals(1, counter++);
+ return 'd';
+ }
+ };
+ var object = {
+ a() { return 'A'; },
+ [key1]() { return 'B'; },
+ c() { return 'C'; },
+ [key2]() { return 'D'; },
+ };
+ assertEquals(2, counter);
+ assertEquals('A', object.a());
+ assertEquals('B', object.b());
+ assertEquals('C', object.c());
+ assertEquals('D', object.d());
+ assertArrayEquals(['a', 'b', 'c', 'd'], Object.keys(object));
+})();
+
+
+(function TestDuplicateKeys() {
+ 'use strict';
+ // ES5 does not allow duplicate keys.
+ // ES6 does but we haven't changed our code yet.
+
+ var object = {
+ a() { return 1; },
+ ['a']() { return 2; },
+ };
+ assertEquals(2, object.a());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names-super.js b/deps/v8/test/mjsunit/harmony/computed-property-names-super.js
new file mode 100644
index 0000000000..096e010317
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/computed-property-names-super.js
@@ -0,0 +1,79 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-computed-property-names --harmony-object-literals
+// Flags: --harmony-classes --allow-natives-syntax
+
+
+function ID(x) {
+ return x;
+}
+
+
+(function TestComputedMethodSuper() {
+ var proto = {
+ m() {
+ return ' proto m';
+ }
+ };
+ var object = {
+ __proto__: proto,
+ ['a']() { return 'a' + super.m(); },
+ [ID('b')]() { return 'b' + super.m(); },
+ [0]() { return '0' + super.m(); },
+ [ID(1)]() { return '1' + super.m(); },
+ };
+
+ assertSame(object, object.a[%HomeObjectSymbol()]);
+
+ assertEquals('a proto m', object.a());
+ assertEquals('b proto m', object.b());
+ assertEquals('0 proto m', object[0]());
+ assertEquals('1 proto m', object[1]());
+})();
+
+
+(function TestComputedGetterSuper() {
+ var proto = {
+ m() {
+ return ' proto m';
+ }
+ };
+ var object = {
+ __proto__: proto,
+ get ['a']() { return 'a' + super.m(); },
+ get [ID('b')]() { return 'b' + super.m(); },
+ get [0]() { return '0' + super.m(); },
+ get [ID(1)]() { return '1' + super.m(); },
+ };
+ assertEquals('a proto m', object.a);
+ assertEquals('b proto m', object.b);
+ assertEquals('0 proto m', object[0]);
+ assertEquals('1 proto m', object[1]);
+})();
+
+
+(function TestComputedSetterSuper() {
+ var value;
+ var proto = {
+ m(name, v) {
+ value = name + ' ' + v;
+ }
+ };
+ var object = {
+ __proto__: proto,
+ set ['a'](v) { super.m('a', v); },
+ set [ID('b')](v) { super.m('b', v); },
+ set [0](v) { super.m('0', v); },
+ set [ID(1)](v) { super.m('1', v); },
+ };
+ object.a = 2;
+ assertEquals('a 2', value);
+ object.b = 3;
+ assertEquals('b 3', value);
+ object[0] = 4;
+ assertEquals('0 4', value);
+ object[1] = 5;
+ assertEquals('1 5', value);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names.js b/deps/v8/test/mjsunit/harmony/computed-property-names.js
new file mode 100644
index 0000000000..69360771c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/computed-property-names.js
@@ -0,0 +1,279 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-computed-property-names
+
+
+function ID(x) {
+ return x;
+}
+
+
+
+(function TestBasicsString() {
+ var object = {
+ a: 'A',
+ ['b']: 'B',
+ c: 'C',
+ [ID('d')]: 'D',
+ };
+ assertEquals('A', object.a);
+ assertEquals('B', object.b);
+ assertEquals('C', object.c);
+ assertEquals('D', object.d);
+ assertArrayEquals(['a', 'b', 'c', 'd'], Object.keys(object));
+})();
+
+
+(function TestBasicsNumber() {
+ var object = {
+ a: 'A',
+ [1]: 'B',
+ c: 'C',
+ [ID(2)]: 'D',
+ };
+ assertEquals('A', object.a);
+ assertEquals('B', object[1]);
+ assertEquals('C', object.c);
+ assertEquals('D', object[2]);
+ // Array indexes first.
+ assertArrayEquals(['1', '2', 'a', 'c'], Object.keys(object));
+})();
+
+
+(function TestBasicsSymbol() {
+ var sym1 = Symbol();
+ var sym2 = Symbol();
+ var object = {
+ a: 'A',
+ [sym1]: 'B',
+ c: 'C',
+ [ID(sym2)]: 'D',
+ };
+ assertEquals('A', object.a);
+ assertEquals('B', object[sym1]);
+ assertEquals('C', object.c);
+ assertEquals('D', object[sym2]);
+ assertArrayEquals(['a', 'c'], Object.keys(object));
+ assertArrayEquals([sym1, sym2], Object.getOwnPropertySymbols(object));
+})();
+
+
+(function TestToNameSideEffects() {
+ var counter = 0;
+ var key1 = {
+ toString: function() {
+ assertEquals(0, counter++);
+ return 'b';
+ }
+ };
+ var key2 = {
+ toString: function() {
+ assertEquals(1, counter++);
+ return 'd';
+ }
+ };
+ var object = {
+ a: 'A',
+ [key1]: 'B',
+ c: 'C',
+ [key2]: 'D',
+ };
+ assertEquals(2, counter);
+ assertEquals('A', object.a);
+ assertEquals('B', object.b);
+ assertEquals('C', object.c);
+ assertEquals('D', object.d);
+ assertArrayEquals(['a', 'b', 'c', 'd'], Object.keys(object));
+})();
+
+
+(function TestToNameSideEffectsNumbers() {
+ var counter = 0;
+ var key1 = {
+ valueOf: function() {
+ assertEquals(0, counter++);
+ return 1;
+ },
+ toString: null
+ };
+ var key2 = {
+ valueOf: function() {
+ assertEquals(1, counter++);
+ return 2;
+ },
+ toString: null
+ };
+
+ var object = {
+ a: 'A',
+ [key1]: 'B',
+ c: 'C',
+ [key2]: 'D',
+ };
+ assertEquals(2, counter);
+ assertEquals('A', object.a);
+ assertEquals('B', object[1]);
+ assertEquals('C', object.c);
+ assertEquals('D', object[2]);
+ // Array indexes first.
+ assertArrayEquals(['1', '2', 'a', 'c'], Object.keys(object));
+})();
+
+
+(function TestDoubleName() {
+ var object = {
+ [1.2]: 'A',
+ [1e55]: 'B',
+ [0.000001]: 'C',
+ [-0]: 'D',
+ // TODO(arv): https://code.google.com/p/v8/issues/detail?id=3815
+ // [Infinity]: 'E',
+ // [-Infinity]: 'F',
+ [NaN]: 'G',
+ };
+ assertEquals('A', object['1.2']);
+ assertEquals('B', object['1e+55']);
+ assertEquals('C', object['0.000001']);
+ assertEquals('D', object[0]);
+ // TODO(arv): https://code.google.com/p/v8/issues/detail?id=3815
+ // assertEquals('E', object[Infinity]);
+ // assertEquals('F', object[-Infinity]);
+ assertEquals('G', object[NaN]);
+})();
+
+
+(function TestGetter() {
+ var object = {
+ get ['a']() {
+ return 'A';
+ }
+ };
+ assertEquals('A', object.a);
+
+ object = {
+ get b() {
+ assertUnreachable();
+ },
+ get ['b']() {
+ return 'B';
+ }
+ };
+ assertEquals('B', object.b);
+
+ object = {
+ get c() {
+ assertUnreachable();
+ },
+ get ['c']() {
+ assertUnreachable();
+ },
+ get ['c']() {
+ return 'C';
+ }
+ };
+ assertEquals('C', object.c);
+
+ object = {
+ get ['d']() {
+ assertUnreachable();
+ },
+ get d() {
+ return 'D';
+ }
+ };
+ assertEquals('D', object.d);
+})();
+
+
+(function TestSetter() {
+ var calls = 0;
+ var object = {
+ set ['a'](_) {
+ calls++;
+ }
+ };
+ object.a = 'A';
+ assertEquals(1, calls);
+
+ calls = 0;
+ object = {
+ set b(_) {
+ assertUnreachable();
+ },
+ set ['b'](_) {
+ calls++;
+ }
+ };
+ object.b = 'B';
+ assertEquals(1, calls);
+
+ calls = 0;
+ object = {
+ set c(_) {
+ assertUnreachable()
+ },
+ set ['c'](_) {
+ assertUnreachable()
+ },
+ set ['c'](_) {
+ calls++
+ }
+ };
+ object.c = 'C';
+ assertEquals(1, calls);
+
+ calls = 0;
+ object = {
+ set ['d'](_) {
+ assertUnreachable()
+ },
+ set d(_) {
+ calls++
+ }
+ };
+ object.d = 'D';
+ assertEquals(1, calls);
+})();
+
+
+(function TestDuplicateKeys() {
+ 'use strict';
+ // ES5 does not allow duplicate keys.
+ // ES6 does but we haven't changed our code yet.
+
+ var object = {
+ a: 1,
+ ['a']: 2,
+ };
+ assertEquals(2, object.a);
+})();
+
+
+(function TestProto() {
+ var proto = {};
+ var object = {
+ __proto__: proto
+ };
+ assertEquals(proto, Object.getPrototypeOf(object));
+
+ object = {
+ '__proto__': proto
+ };
+ assertEquals(proto, Object.getPrototypeOf(object));
+
+ object = {
+ ['__proto__']: proto
+ };
+ assertEquals(Object.prototype, Object.getPrototypeOf(object));
+ assertEquals(proto, object.__proto__);
+ assertTrue(object.hasOwnProperty('__proto__'));
+
+ object = {
+ [ID('x')]: 'X',
+ __proto__: proto
+ };
+ assertEquals('X', object.x);
+ assertEquals(proto, Object.getPrototypeOf(object));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/method-name-eval-arguments.js b/deps/v8/test/mjsunit/harmony/method-name-eval-arguments.js
new file mode 100644
index 0000000000..360aadbca9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/method-name-eval-arguments.js
@@ -0,0 +1,35 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-object-literals
+
+(function TestSloppyMode() {
+ var o = {
+ eval() {
+ return 1;
+ },
+ arguments() {
+ return 2;
+ },
+ };
+
+ assertEquals(1, o.eval());
+ assertEquals(2, o.arguments());
+})();
+
+(function TestStrictMode() {
+ 'use strict';
+
+ var o = {
+ eval() {
+ return 1;
+ },
+ arguments() {
+ return 2;
+ },
+ };
+
+ assertEquals(1, o.eval());
+ assertEquals(2, o.arguments());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-1145.js b/deps/v8/test/mjsunit/harmony/module-parsing-eval.js
index 16d5527bbb..fa9e5ec35c 100644
--- a/deps/v8/test/mjsunit/regress/regress-1145.js
+++ b/deps/v8/test/mjsunit/harmony/module-parsing-eval.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,30 +25,17 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --opt-eagerly --debug-code --lazy
+// Flags: --harmony-modules
-// See: http://code.google.com/p/v8/issues/detail?id=1145
-// Should not throw a syntax error exception (change this if we make lazily
-// compiled functions with syntax errors into early errors).
-// Should not hit an assertion in debug mode.
+// Check that import/export declarations are rejected in eval or local scope.
+assertThrows("export let x;", SyntaxError);
+assertThrows("import x from 'http://url';", SyntaxError);
-// A lazily compiled function with a syntax error that is attempted inlined
-// would set a pending exception that is then ignored (until it triggers
-// an assert).
-// This file must be at least 1024 bytes long to trigger lazy compilation.
+assertThrows("{ export let x; }", SyntaxError);
+assertThrows("{ import x from 'http://url'; }", SyntaxError);
-function f() { return 1; }
+assertThrows("function f() { export let x; }", SyntaxError);
+assertThrows("function f() { import x from 'http://url'; }", SyntaxError);
-// Must be lazy. Must throw SyntaxError during compilation.
-function fail() { continue; }
-
-function opt_me() {
- var x = 1;
- // Do lots of function calls and hope to be optimized.
- for (var i = 0; i < 1000000; i++) {
- x = f();
- }
- if (x == 0) fail(); // Hope to be inlined during optimization.
-}
-
-opt_me();
+assertThrows("function f() { { export let x; } }", SyntaxError);
+assertThrows("function f() { { import x from 'http://url'; } }", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/module-parsing.js b/deps/v8/test/mjsunit/harmony/module-parsing.js
deleted file mode 100644
index 8a9103d132..0000000000
--- a/deps/v8/test/mjsunit/harmony/module-parsing.js
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-modules
-
-// Test basic module syntax, with and without automatic semicolon insertion.
-
-module A {}
-
-module A1 = A
-module A2 = A;
-module A3 = A2
-
-module B {
- export vx
- export vy, lz, c, f
-
- var vx
- var vx, vy;
- var vx = 0, vy
- let lx, ly
- let lz = 1
- const c = 9
- function f() {}
-
- module C0 {}
-
- export module C {
- let x
- export module D { export let x }
- let y
- }
-
- let zz = ""
-
- export var x0
- export var x1, x2 = 6, x3
- export let y0
- export let y1 = 0, y2
- export const z0 = 0
- export const z1 = 2, z2 = 3
- export function f0() {}
- export module M1 {}
- export module M2 = C.D
- export module M3 at "http://where"
-
- import i0 from I
- import i1, i2, i3, M from I
- //import i4, i5 from "http://where"
-}
-
-module I {
- export let i0, i1, i2, i3;
- export module M {}
-}
-
-module C1 = B.C;
-module D1 = B.C.D
-module D2 = C1.D
-module D3 = D2
-
-module E1 at "http://where"
-module E2 at "http://where";
-module E3 = E1
-
-// Check that ASI does not interfere.
-
-module X
-{
-let x
-}
-
-module Y
-=
-X
-
-module Z
-at
-"file://local"
-
-import
-vx
-,
-vy
-from
-B
-
-
-module Wrap {
-export
-x
-,
-y
-
-var
-x
-,
-y
-
-export
-var
-v1 = 1
-
-export
-let
-v2 = 2
-
-export
-const
-v3 = 3
-
-export
-function
-f
-(
-)
-{
-}
-
-export
-module V
-{
-}
-}
-
-export A, A1, A2, A3, B, I, C1, D1, D2, D3, E1, E2, E3, X, Y, Z, Wrap, x, y, UU
-
-
-
-// Check that 'module' still works as an identifier.
-
-var module
-module = {}
-module["a"] = 6
-function module() {}
-function f(module) { return module }
-try {} catch (module) {}
-
-module
-v = 20
-
-
-
-// Check that module declarations are rejected in eval or local scope.
-
-module M { export let x; }
-
-assertThrows("export x;", SyntaxError); // It's using eval, so should throw.
-assertThrows("export let x;", SyntaxError);
-assertThrows("import x from M;", SyntaxError);
-assertThrows("module M {};", SyntaxError);
-
-assertThrows("{ export x; }", SyntaxError);
-assertThrows("{ export let x; }", SyntaxError);
-assertThrows("{ import x from M; }", SyntaxError);
-assertThrows("{ module M {}; }", SyntaxError);
-
-assertThrows("function f() { export x; }", SyntaxError);
-assertThrows("function f() { export let x; }", SyntaxError);
-assertThrows("function f() { import x from M; }", SyntaxError);
-assertThrows("function f() { module M {}; }", SyntaxError);
-
-assertThrows("function f() { { export x; } }", SyntaxError);
-assertThrows("function f() { { export let x; } }", SyntaxError);
-assertThrows("function f() { { import x from M; } }", SyntaxError);
-assertThrows("function f() { { module M {}; } }", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/modules.js b/deps/v8/test/mjsunit/harmony/modules.js
new file mode 100644
index 0000000000..e56880500b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export let a = 42;
+assertEquals(42, a);
diff --git a/deps/v8/test/mjsunit/harmony/object-literals-method.js b/deps/v8/test/mjsunit/harmony/object-literals-method.js
index 71f44d10bc..605e269fcb 100644
--- a/deps/v8/test/mjsunit/harmony/object-literals-method.js
+++ b/deps/v8/test/mjsunit/harmony/object-literals-method.js
@@ -246,3 +246,27 @@ function assertIteratorResult(value, done, result) {
};
assertEquals('*method() { yield 1; }', object.method.toString());
})();
+
+
+(function TestProtoName() {
+ var object = {
+ __proto__() {
+ return 1;
+ }
+ };
+ assertEquals(Object.prototype, Object.getPrototypeOf(object));
+ assertEquals(1, object.__proto__());
+})();
+
+
+(function TestProtoName2() {
+ var p = {};
+ var object = {
+ __proto__() {
+ return 1;
+ },
+ __proto__: p
+ };
+ assertEquals(p, Object.getPrototypeOf(object));
+ assertEquals(1, object.__proto__());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/object-literals-property-shorthand.js b/deps/v8/test/mjsunit/harmony/object-literals-property-shorthand.js
index 2921495d89..9756da46c1 100644
--- a/deps/v8/test/mjsunit/harmony/object-literals-property-shorthand.js
+++ b/deps/v8/test/mjsunit/harmony/object-literals-property-shorthand.js
@@ -49,3 +49,25 @@
function f(x) { return {x}; }
assertEquals('function f(x) { return {x}; }', f.toString());
})();
+
+
+(function TestProtoName() {
+ var __proto__ = 1;
+ var object = {
+ __proto__
+ };
+ assertEquals(Object.prototype, Object.getPrototypeOf(object));
+ assertEquals(1, object.__proto__);
+})();
+
+
+(function TestProtoName2() {
+ var __proto__ = 1;
+ var p = {};
+ var object = {
+ __proto__: p,
+ __proto__,
+ };
+ assertEquals(p, Object.getPrototypeOf(object));
+ assertEquals(1, object.__proto__);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/object-literals-super.js b/deps/v8/test/mjsunit/harmony/object-literals-super.js
index ec22b8a8a3..c2d456c877 100644
--- a/deps/v8/test/mjsunit/harmony/object-literals-super.js
+++ b/deps/v8/test/mjsunit/harmony/object-literals-super.js
@@ -22,15 +22,6 @@
set accessor(v) {
super.accessor = v;
},
- property: function() {
- super.property();
- },
- propertyWithParen: (function() {
- super.property();
- }),
- propertyWithParens: ((function() {
- super.property();
- })),
methodNoSuper() {},
get getterNoSuper() {},
@@ -50,9 +41,6 @@
desc = Object.getOwnPropertyDescriptor(object, 'accessor');
assertEquals(object, desc.get[%HomeObjectSymbol()]);
assertEquals(object, desc.set[%HomeObjectSymbol()]);
- assertEquals(object, object.property[%HomeObjectSymbol()]);
- assertEquals(object, object.propertyWithParen[%HomeObjectSymbol()]);
- assertEquals(object, object.propertyWithParens[%HomeObjectSymbol()]);
assertEquals(undefined, object.methodNoSuper[%HomeObjectSymbol()]);
desc = Object.getOwnPropertyDescriptor(object, 'getterNoSuper');
@@ -118,21 +106,6 @@
})();
-(function TestMethodAsProperty() {
- var object = {
- __proto__: {
- method: function(x) {
- return 'proto' + x;
- }
- },
- method: function(x) {
- return super.method(x);
- }
- };
- assertEquals('proto42', object.method(42));
-})();
-
-
(function TestOptimized() {
// Object literals without any accessors get optimized.
var object = {
@@ -154,15 +127,7 @@
*g() {
yield super.m();
},
- g2: function*() {
- yield super.m() + 1;
- },
- g3: (function*() {
- yield super.m() + 2;
- })
};
assertEquals(42, o.g().next().value);
- assertEquals(43, o.g2().next().value);
- assertEquals(44, o.g3().next().value);
})();
diff --git a/deps/v8/test/mjsunit/harmony/regexp-flags.js b/deps/v8/test/mjsunit/harmony/regexp-flags.js
index 475fda493c..fae88610e4 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-flags.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-flags.js
@@ -2,19 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexps
+// Flags: --harmony-regexps --harmony-unicode-regexps
RegExp.prototype.flags = 'setter should be undefined';
assertEquals('', RegExp('').flags);
assertEquals('', /./.flags);
-assertEquals('gimy', RegExp('', 'ygmi').flags);
-assertEquals('gimy', /foo/ymig.flags);
-
-// TODO(dslomov): When support for the `u` flag is added, uncomment the first
-// line below and remove the second line.
-//assertEquals(RegExp('', 'yumig').flags, 'gimuy');
-assertThrows(function() { RegExp('', 'yumig').flags; }, SyntaxError);
+assertEquals('gimuy', RegExp('', 'yugmi').flags);
+assertEquals('gimuy', /foo/yumig.flags);
var descriptor = Object.getOwnPropertyDescriptor(RegExp.prototype, 'flags');
assertTrue(descriptor.configurable);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-455141.js b/deps/v8/test/mjsunit/harmony/regress/regress-455141.js
new file mode 100644
index 0000000000..cf2141f903
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-455141.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes --no-lazy
+"use strict";
+class Base {
+}
+class Subclass extends Base {
+ constructor() {
+ this.prp1 = 3;
+ }
+}
+function __f_1(){
+}
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-typedarray-out-of-bounds.js b/deps/v8/test/mjsunit/harmony/regress/regress-typedarray-out-of-bounds.js
new file mode 100644
index 0000000000..4db280aac9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-typedarray-out-of-bounds.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = new Int32Array(10);
+function f(a) { a["-1"] = 15; }
+for (var i = 0; i < 3; i++) {
+ f(a);
+}
+assertEquals(undefined, a[-1]);
diff --git a/deps/v8/test/mjsunit/harmony/rest-params-lazy-parsing.js b/deps/v8/test/mjsunit/harmony/rest-params-lazy-parsing.js
new file mode 100644
index 0000000000..ba8e3008b9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/rest-params-lazy-parsing.js
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rest-parameters --min-preparse-length=0
+
+function variadic(co, ...values) {
+ var sum = 0;
+ while (values.length) {
+ sum += co * values.pop();
+ }
+ return sum;
+}
+
+assertEquals(90, variadic(2, 1, 2, 3, 4, 5, 6, 7, 8, 9));
+assertEquals(74, variadic(2, 1, 2, 3, 4, 5, 6, 7, 9));
+assertEquals(110, variadic(2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
diff --git a/deps/v8/test/mjsunit/harmony/rest-params.js b/deps/v8/test/mjsunit/harmony/rest-params.js
new file mode 100644
index 0000000000..5bb258ee68
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/rest-params.js
@@ -0,0 +1,182 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rest-parameters
+
+(function testRestIndex() {
+ assertEquals(5, (function(...args) { return args.length; })(1,2,3,4,5));
+ assertEquals(4, (function(a, ...args) { return args.length; })(1,2,3,4,5));
+ assertEquals(3, (function(a, b, ...args) { return args.length; })(1,2,3,4,5));
+ assertEquals(2, (function(a, b, c, ...args) {
+ return args.length; })(1,2,3,4,5));
+ assertEquals(1, (function(a, b, c, d, ...args) {
+ return args.length; })(1,2,3,4,5));
+ assertEquals(0, (function(a, b, c, d, e, ...args) {
+ return args.length; })(1,2,3,4,5));
+})();
+
+function strictTest(a, b, ...c) {
+ "use strict";
+ assertEquals(Array, c.constructor);
+ assertTrue(Array.isArray(c));
+
+ var expectedLength = arguments.length >= 3 ? arguments.length - 2 : 0;
+ assertEquals(expectedLength, c.length);
+
+ for (var i = 2, j = 0; i < arguments.length; ++i) {
+ assertEquals(c[j++], arguments[i]);
+ }
+}
+
+function sloppyTest(a, b, ...c) {
+ assertEquals(Array, c.constructor);
+ assertTrue(Array.isArray(c));
+
+ var expectedLength = arguments.length >= 3 ? arguments.length - 2 : 0;
+ assertEquals(expectedLength, c.length);
+
+ for (var i = 2, j = 0; i < arguments.length; ++i) {
+ assertEquals(c[j++], arguments[i]);
+ }
+}
+
+
+var O = {
+ strict: strictTest,
+ sloppy: sloppyTest
+};
+
+(function testStrictRestParamArity() {
+ assertEquals(2, strictTest.length);
+ assertEquals(2, O.strict.length);
+})();
+
+
+(function testRestParamsStrictMode() {
+ strictTest();
+ strictTest(1, 2);
+ strictTest(1, 2, 3, 4, 5, 6);
+ strictTest(1, 2, 3);
+ O.strict();
+ O.strict(1, 2);
+ O.strict(1, 2, 3, 4, 5, 6);
+ O.strict(1, 2, 3);
+})();
+
+
+(function testRestParamsStrictModeApply() {
+ strictTest.apply(null, []);
+ strictTest.apply(null, [1, 2]);
+ strictTest.apply(null, [1, 2, 3, 4, 5, 6]);
+ strictTest.apply(null, [1, 2, 3]);
+ O.strict.apply(O, []);
+ O.strict.apply(O, [1, 2]);
+ O.strict.apply(O, [1, 2, 3, 4, 5, 6]);
+ O.strict.apply(O, [1, 2, 3]);
+})();
+
+
+(function testRestParamsStrictModeCall() {
+ strictTest.call(null);
+ strictTest.call(null, 1, 2);
+ strictTest.call(null, 1, 2, 3, 4, 5, 6);
+ strictTest.call(null, 1, 2, 3);
+ O.strict.call(O);
+ O.strict.call(O, 1, 2);
+ O.strict.call(O, 1, 2, 3, 4, 5, 6);
+ O.strict.call(O, 1, 2, 3);
+})();
+
+
+(function testsloppyRestParamArity() {
+ assertEquals(2, sloppyTest.length);
+ assertEquals(2, O.sloppy.length);
+})();
+
+
+(function testRestParamssloppyMode() {
+ sloppyTest();
+ sloppyTest(1, 2);
+ sloppyTest(1, 2, 3, 4, 5, 6);
+ sloppyTest(1, 2, 3);
+ O.sloppy();
+ O.sloppy(1, 2);
+ O.sloppy(1, 2, 3, 4, 5, 6);
+ O.sloppy(1, 2, 3);
+})();
+
+
+(function testRestParamssloppyModeApply() {
+ sloppyTest.apply(null, []);
+ sloppyTest.apply(null, [1, 2]);
+ sloppyTest.apply(null, [1, 2, 3, 4, 5, 6]);
+ sloppyTest.apply(null, [1, 2, 3]);
+ O.sloppy.apply(O, []);
+ O.sloppy.apply(O, [1, 2]);
+ O.sloppy.apply(O, [1, 2, 3, 4, 5, 6]);
+ O.sloppy.apply(O, [1, 2, 3]);
+})();
+
+
+(function testRestParamssloppyModeCall() {
+ sloppyTest.call(null);
+ sloppyTest.call(null, 1, 2);
+ sloppyTest.call(null, 1, 2, 3, 4, 5, 6);
+ sloppyTest.call(null, 1, 2, 3);
+ O.sloppy.call(O);
+ O.sloppy.call(O, 1, 2);
+ O.sloppy.call(O, 1, 2, 3, 4, 5, 6);
+ O.sloppy.call(O, 1, 2, 3);
+})();
+
+
+(function testUnmappedArguments() {
+ // Strict/Unmapped arguments should always be used for functions with rest
+ // parameters
+ assertThrows(function(...rest) { return arguments.caller; }, TypeError);
+ assertThrows(function(...rest) { return arguments.callee; }, TypeError);
+ // TODO(caitp): figure out why this doesn't throw sometimes, even though the
+ // getter always does =)
+ // assertThrows(function(...rest) { arguments.caller = 1; }, TypeError);
+ // assertThrows(function(...rest) { arguments.callee = 1; }, TypeError);
+})();
+
+
+(function testNoAliasArgumentsStrict() {
+ function strictF(a, ...rest) {
+ "use strict";
+ arguments[0] = 1;
+ assertEquals(3, a);
+ arguments[1] = 2;
+ assertArrayEquals([4, 5], rest);
+ }
+ strictF(3, 4, 5);
+})();
+
+
+(function testNoAliasArgumentsSloppy() {
+ function sloppyF(a, ...rest) {
+ arguments[0] = 1;
+ assertEquals(3, a);
+ arguments[1] = 2;
+ assertArrayEquals([4, 5], rest);
+ }
+ sloppyF(3, 4, 5);
+})();
+
+
+/* TODO(caitp): support arrow functions (blocked on spread operator support)
+(function testRestParamsArrowFunctions() {
+ "use strict";
+
+ var fn = (a, b, ...c) => c;
+ assertEquals([], fn());
+ assertEquals([], fn(1, 2));
+ assertEquals([3], fn(1, 2, 3));
+ assertEquals([3, 4], fn(1, 2, 3, 4));
+ assertEquals([3, 4, 5], fn(1, 2, 3, 4, 5));
+ assertThrows("var x = ...y => y;", SyntaxError);
+ assertEquals([], ((...args) => args)());
+ assertEquals([1,2,3], ((...args) => args)(1,2,3));
+})();*/
diff --git a/deps/v8/test/mjsunit/harmony/super.js b/deps/v8/test/mjsunit/harmony/super.js
index 6dcc393cce..988cef22e2 100644
--- a/deps/v8/test/mjsunit/harmony/super.js
+++ b/deps/v8/test/mjsunit/harmony/super.js
@@ -1,36 +1,38 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-classes
+// Flags: --harmony-classes --allow-natives-syntax
(function TestSuperNamedLoads() {
function Base() { }
+ function fBase() { }
+ Base.prototype = {
+ f() {
+ return "Base " + this.toString();
+ },
+ x: 15,
+ toString() {
+ return "this is Base";
+ }
+ };
+
function Derived() {
this.derivedDataProperty = "xxx";
}
- Derived.prototype = Object.create(Base.prototype);
-
- function fBase() { return "Base " + this.toString(); }
-
- Base.prototype.f = fBase.toMethod(Base.prototype);
-
- function fDerived() {
- assertEquals("Base this is Derived", super.f());
- var a = super.x;
- assertEquals(15, a);
- assertEquals(15, super.x);
- assertEquals(27, this.x);
-
- return "Derived"
- }
-
- Base.prototype.x = 15;
- Base.prototype.toString = function() { return "this is Base"; };
- Derived.prototype.toString = function() { return "this is Derived"; };
- Derived.prototype.x = 27;
- Derived.prototype.f = fDerived.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ toString() { return "this is Derived"; },
+ x: 27,
+ f() {
+ assertEquals("Base this is Derived", super.f());
+ var a = super.x;
+ assertEquals(15, a);
+ assertEquals(15, super.x);
+ assertEquals(27, this.x);
+ return "Derived";
+ }
+ };
assertEquals("Base this is Base", new Base().f());
assertEquals("Derived", new Derived().f());
@@ -38,34 +40,39 @@
(function TestSuperKeyedLoads() {
+ 'use strict';
+
var x = 'x';
var derivedDataProperty = 'derivedDataProperty';
var f = 'f';
- function Base() { }
- function Derived() {
- this[derivedDataProperty] = 'xxx';
- }
- Derived.prototype = Object.create(Base.prototype);
-
- function fBase() { return "Base " + this.toString(); }
-
- Base.prototype[f] = fBase.toMethod(Base.prototype);
-
- function fDerived() {
- assertEquals("Base this is Derived", super[f]());
- var a = super[x];
- assertEquals(15, a);
- assertEquals(15, super[x]);
- assertEquals(27, this[x]);
- return "Derived"
+ class Base {
+ f() {
+ return "Base " + this.toString();
+ }
+ toString() {
+ return "this is Base";
+ }
}
Base.prototype[x] = 15;
- Base.prototype.toString = function() { return "this is Base"; };
- Derived.prototype.toString = function() { return "this is Derived"; };
- Derived.prototype[x] = 27;
- Derived.prototype[f] = fDerived.toMethod(Derived.prototype);
+
+ function Derived() {
+ this[derivedDataProperty] = "xxx";
+ }
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ toString() { return "this is Derived"; },
+ x: 27,
+ f() {
+ assertEquals("Base this is Derived", super[f]());
+ var a = super[x];
+ assertEquals(15, a);
+ assertEquals(15, super[x]);
+ assertEquals(27, this[x]);
+ return "Derived";
+ }
+ };
assertEquals("Base this is Base", new Base().f());
assertEquals("Derived", new Derived().f());
@@ -76,31 +83,29 @@
var x = 1;
var derivedDataProperty = 2;
var f = 3;
- function Base() { }
- function Derived() {
- this[derivedDataProperty] = 'xxx';
- }
- Derived.prototype = Object.create(Base.prototype);
+ function Base() { }
function fBase() { return "Base " + this.toString(); }
-
- Base.prototype[f] = fBase.toMethod(Base.prototype);
-
- function fDerived() {
- assertEquals("Base this is Derived", super[f]());
- var a = super[x];
- assertEquals(15, a);
- assertEquals(15, super[x]);
- assertEquals(27, this[x]);
-
- return "Derived"
- }
-
+ Base.prototype[f] = %ToMethod(fBase, Base.prototype);
Base.prototype[x] = 15;
Base.prototype.toString = function() { return "this is Base"; };
- Derived.prototype.toString = function() { return "this is Derived"; };
- Derived.prototype[x] = 27;
- Derived.prototype[f] = fDerived.toMethod(Derived.prototype);
+
+ function Derived() {
+ this[derivedDataProperty] = "xxx";
+ }
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ toString() { return "this is Derived"; },
+ 1: 27,
+ 3() {
+ assertEquals("Base this is Derived", super[f]());
+ var a = super[x];
+ assertEquals(15, a);
+ assertEquals(15, super[x]);
+ assertEquals(27, this[x]);
+ return "Derived";
+ }
+ };
assertEquals("Base this is Base", new Base()[f]());
assertEquals("Derived", new Derived()[f]());
@@ -108,11 +113,17 @@
(function TestSuperKeywordNonMethod() {
- function f() {
- super.unknown();
+ 'use strict';
+
+ class C {
+ f() {
+ super.unknown();
+ }
}
- assertThrows(f, ReferenceError);
+ assertThrows(function() {
+ new C().f();
+ }, TypeError);
}());
@@ -133,15 +144,16 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- _x: 'derived'
+ _x: 'derived',
+ testGetter() {
+ return super.x;
+ },
+ testGetterStrict() {
+ 'use strict';
+ return super.x;
+ }
};
- Derived.prototype.testGetter = function() {
- return super.x;
- }.toMethod(Derived.prototype);
- Derived.prototype.testGetterStrict = function() {
- 'use strict';
- return super.x;
- }.toMethod(Derived.prototype);
+
derived = new Derived();
assertEquals('derived', derived.testGetter());
derived = new Derived();
@@ -167,44 +179,45 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- _x: 'derived'
+ _x: 'derived',
+ testGetter() {
+ return super[x];
+ },
+ testGetterStrict() {
+ 'use strict';
+ return super[x];
+ },
+ testGetterWithToString() {
+ var toStringCalled;
+ var o = { toString: function() {
+ toStringCalled++;
+ return 'x';
+ } };
+
+ toStringCalled = 0;
+ assertEquals('derived', super[o]);
+ assertEquals(1, toStringCalled);
+
+ var eToThrow = new Error();
+ var oThrowsInToString = { toString: function() {
+ throw eToThrow;
+ } };
+
+ var ex = null;
+ try {
+ super[oThrowsInToString];
+ } catch(e) { ex = e }
+ assertEquals(eToThrow, ex);
+
+ var oReturnsNumericString = { toString: function() {
+ return "1";
+ } };
+
+ assertEquals(undefined, super[oReturnsNumericString]);
+ assertEquals(undefined, super[1]);
+ }
};
- Derived.prototype.testGetter = function() {
- return super[x];
- }.toMethod(Derived.prototype);
- Derived.prototype.testGetterStrict = function() {
- 'use strict';
- return super[x];
- }.toMethod(Derived.prototype);
- Derived.prototype.testGetterWithToString = function() {
- var toStringCalled;
- var o = { toString: function() {
- toStringCalled++;
- return 'x';
- } };
-
- toStringCalled = 0;
- assertEquals('derived', super[o]);
- assertEquals(1, toStringCalled);
-
- var eToThrow = new Error();
- var oThrowsInToString = { toString: function() {
- throw eToThrow;
- } };
-
- var ex = null;
- try {
- super[oThrowsInToString];
- } catch(e) { ex = e }
- assertEquals(eToThrow, ex);
-
- var oReturnsNumericString = { toString: function() {
- return "1";
- } };
-
- assertEquals(undefined, super[oReturnsNumericString]);
- assertEquals(undefined, super[1]);
- }.toMethod(Derived.prototype);
+
derived = new Derived();
assertEquals('derived', derived.testGetter());
derived = new Derived();
@@ -233,45 +246,51 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- _x: 'derived'
+ _x: 'derived',
+ testGetter() {
+ return super[x];
+ },
+ testGetterStrict() {
+ 'use strict';
+ return super[x];
+ },
+ testGetterWithToString() {
+ var toStringCalled;
+ var o = {
+ toString: function() {
+ toStringCalled++;
+ return '42';
+ }
+ };
+
+ toStringCalled = 0;
+ assertEquals('derived', super[o]);
+ assertEquals(1, toStringCalled);
+
+ var eToThrow = new Error();
+ var oThrowsInToString = {
+ toString: function() {
+ throw eToThrow;
+ }
+ };
+
+ var ex = null;
+ try {
+ super[oThrowsInToString];
+ } catch(e) { ex = e }
+ assertEquals(eToThrow, ex);
+
+ var oReturnsNumericString = {
+ toString: function() {
+ return "42";
+ }
+ };
+
+ assertEquals('derived', super[oReturnsNumericString]);
+ assertEquals('derived', super[42]);
+ }
};
- Derived.prototype.testGetter = function() {
- return super[x];
- }.toMethod(Derived.prototype);
- Derived.prototype.testGetterStrict = function() {
- 'use strict';
- return super[x];
- }.toMethod(Derived.prototype);
-
- Derived.prototype.testGetterWithToString = function() {
- var toStringCalled;
- var o = { toString: function() {
- toStringCalled++;
- return '42';
- } };
-
- toStringCalled = 0;
- assertEquals('derived', super[o]);
- assertEquals(1, toStringCalled);
-
- var eToThrow = new Error();
- var oThrowsInToString = { toString: function() {
- throw eToThrow;
- } };
-
- var ex = null;
- try {
- super[oThrowsInToString];
- } catch(e) { ex = e }
- assertEquals(eToThrow, ex);
-
- var oReturnsNumericString = { toString: function() {
- return "42";
- } };
-
- assertEquals('derived', super[oReturnsNumericString]);
- assertEquals('derived', super[42]);
- }.toMethod(Derived.prototype);
+
derived = new Derived();
assertEquals('derived', derived.testGetter());
derived = new Derived();
@@ -299,22 +318,24 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- _x: 'derived'
+ _x: 'derived',
+ testSetter() {
+ assertEquals('foobar', super.x = 'foobar');
+ assertEquals('foobarabc', super.x += 'abc');
+ },
+ testSetterStrict() {
+ 'use strict';
+ assertEquals('foobar', super.x = 'foobar');
+ assertEquals('foobarabc', super.x += 'abc');
+ }
};
- Derived.prototype.testSetter = function() {
- assertEquals('foobar', super.x = 'foobar');
- assertEquals('foobarabc', super.x += 'abc');
- }.toMethod(Derived.prototype);
+
var d = new Derived();
d.testSetter();
assertEquals('base', Base.prototype._x);
assertEquals('foobarabc', d._x);
d._x = '';
- Derived.prototype.testSetterStrict = function() {
- 'use strict';
- assertEquals('foobar', super.x = 'foobar');
- assertEquals('foobarabc', super.x += 'abc');
- }.toMethod(Derived.prototype);
+
d.testSetterStrict();
assertEquals('base', Base.prototype._x);
assertEquals('foobarabc', d._x);
@@ -339,51 +360,56 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- _x: 'derived'
+ _x: 'derived',
+ testSetter() {
+ assertEquals('foobar', super[x] = 'foobar');
+ assertEquals('foobarabc', super[x] += 'abc');
+ },
+ testSetterStrict() {
+ 'use strict';
+ assertEquals('foobar', super[x] = 'foobar');
+ assertEquals('foobarabc', super[x] += 'abc');
+ },
+ testSetterWithToString() {
+ var toStringCalled;
+ var o = {
+ toString: function() {
+ toStringCalled++;
+ return x;
+ }
+ };
+
+ toStringCalled = 0;
+ super[o] = 'set';
+ assertEquals(1, toStringCalled);
+ assertEquals('set', this._x);
+
+ var eToThrow = new Error();
+ var oThrowsInToString = {
+ toString: function() {
+ throw eToThrow;
+ }
+ };
+
+ var ex = null;
+ try {
+ super[oThrowsInToString] = 'xyz';
+ } catch(e) { ex = e }
+ assertEquals(eToThrow, ex);
+ assertEquals('set', this._x);
+ }
};
- Derived.prototype.testSetter = function() {
- assertEquals('foobar', super[x] = 'foobar');
- assertEquals('foobarabc', super[x] += 'abc');
- }.toMethod(Derived.prototype);
+
var d = new Derived();
d.testSetter();
assertEquals('base', Base.prototype._x);
assertEquals('foobarabc', d._x);
d._x = '';
- Derived.prototype.testSetterStrict = function() {
- 'use strict';
- assertEquals('foobar', super[x] = 'foobar');
- assertEquals('foobarabc', super[x] += 'abc');
- }.toMethod(Derived.prototype);
+
d.testSetterStrict();
assertEquals('base', Base.prototype._x);
assertEquals('foobarabc', d._x);
-
- Derived.prototype.testSetterWithToString = function() {
- var toStringCalled;
- var o = { toString: function() {
- toStringCalled++;
- return x;
- } };
-
- toStringCalled = 0;
- super[o] = 'set';
- assertEquals(1, toStringCalled);
- assertEquals('set', this._x);
-
- var eToThrow = new Error();
- var oThrowsInToString = { toString: function() {
- throw eToThrow;
- } };
-
- var ex = null;
- try {
- super[oThrowsInToString] = 'xyz';
- } catch(e) { ex = e }
- assertEquals(eToThrow, ex);
- assertEquals('set', this._x);
- }.toMethod(Derived.prototype);
d = new Derived();
d.testSetterWithToString();
}());
@@ -408,61 +434,67 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- _x: 'derived'
+ _x: 'derived',
+ testSetter() {
+ assertEquals('foobar', super[x] = 'foobar');
+ assertEquals('foobarabc', super[x] += 'abc');
+ },
+ testSetterStrict() {
+ 'use strict';
+ assertEquals('foobar', super[x] = 'foobar');
+ assertEquals('foobarabc', super[x] += 'abc');
+ },
+ testSetterWithToString() {
+ var toStringCalled;
+ var o = {
+ toString: function() {
+ toStringCalled++;
+ return 'x';
+ }
+ };
+
+ toStringCalled = 0;
+ super[o] = 'set';
+ assertEquals(1, toStringCalled);
+ assertEquals('set', this._x);
+
+ var eToThrow = new Error();
+ var oThrowsInToString = {
+ toString: function() {
+ throw eToThrow;
+ }
+ };
+
+ var ex = null;
+ try {
+ super[oThrowsInToString] = 'xyz';
+ } catch(e) { ex = e }
+ assertEquals(eToThrow, ex);
+ assertEquals('set', this._x);
+
+ var oReturnsNumericString = {
+ toString: function() {
+ return "1";
+ }
+ };
+
+ assertEquals('abc', super[oReturnsNumericString] = 'abc');
+
+ assertEquals('set', this._x);
+
+ assertEquals(10, super[1] = 10);
+ }
};
- Derived.prototype.testSetter = function() {
- assertEquals('foobar', super[x] = 'foobar');
- assertEquals('foobarabc', super[x] += 'abc');
- }.toMethod(Derived.prototype);
+
var d = new Derived();
d.testSetter();
assertEquals('base', Base.prototype._x);
assertEquals('foobarabc', d._x);
d._x = '';
- Derived.prototype.testSetterStrict = function() {
- 'use strict';
- assertEquals('foobar', super[x] = 'foobar');
- assertEquals('foobarabc', super[x] += 'abc');
- }.toMethod(Derived.prototype);
d.testSetterStrict();
assertEquals('base', Base.prototype._x);
assertEquals('foobarabc', d._x);
-
- Derived.prototype.testSetterWithToString = function() {
- var toStringCalled;
- var o = { toString: function() {
- toStringCalled++;
- return 'x';
- } };
-
- toStringCalled = 0;
- super[o] = 'set';
- assertEquals(1, toStringCalled);
- assertEquals('set', this._x);
-
- var eToThrow = new Error();
- var oThrowsInToString = { toString: function() {
- throw eToThrow;
- } };
-
- var ex = null;
- try {
- super[oThrowsInToString] = 'xyz';
- } catch(e) { ex = e }
- assertEquals(eToThrow, ex);
- assertEquals('set', this._x);
-
- var oReturnsNumericString = { toString: function() {
- return "1";
- } };
-
- assertEquals('abc', super[oReturnsNumericString] = 'abc');
-
- assertEquals('set', this._x);
-
- assertEquals(10, super[1] = 10);
- }.toMethod(Derived.prototype);
d = new Derived();
d.testSetterWithToString();
}());
@@ -479,15 +511,14 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
+ testSetter() {
+ assertEquals('x from Base', super.x);
+ super.x = 'data property';
+ assertEquals('x from Base', super.x);
+ assertEquals('data property', this.x);
+ }
};
- Derived.prototype.testSetter = function() {
- assertEquals('x from Base', super.x);
- super.x = 'data property';
- assertEquals('x from Base', super.x);
- assertEquals('data property', this.x);
- }.toMethod(Derived.prototype);
-
new Derived().testSetter();
}());
@@ -504,15 +535,14 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
+ testSetter() {
+ assertEquals('x from Base', super[x]);
+ super[x] = 'data property';
+ assertEquals('x from Base', super[x]);
+ assertEquals('data property', this[x]);
+ }
};
- Derived.prototype.testSetter = function() {
- assertEquals('x from Base', super[x]);
- super[x] = 'data property';
- assertEquals('x from Base', super[x]);
- assertEquals('data property', this[x]);
- }.toMethod(Derived.prototype);
-
new Derived().testSetter();
}());
@@ -529,15 +559,14 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
+ testSetter() {
+ assertEquals('x from Base', super[x]);
+ super[x] = 'data property';
+ assertEquals('x from Base', super[x]);
+ assertEquals('data property', this[x]);
+ }
};
- Derived.prototype.testSetter = function() {
- assertEquals('x from Base', super[x]);
- super[x] = 'data property';
- assertEquals('x from Base', super[x]);
- assertEquals('data property', this[x]);
- }.toMethod(Derived.prototype);
-
new Derived().testSetter();
}());
@@ -562,73 +591,74 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- };
- Derived.prototype.testSetter = function() {
- setCalled = 0;
- getCalled = 0;
- assertEquals('object', typeof this);
- assertTrue(this instanceof Number)
- assertEquals(42, this.valueOf());
- assertEquals(1, super.x);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- assertEquals(5, super.x = 5);
- assertEquals(1, getCalled);
- assertEquals(1, setCalled);
-
- assertEquals(6, super.x += 5);
- assertEquals(2, getCalled);
- assertEquals(2, setCalled);
-
- super.newProperty = 15;
- assertEquals(15, this.newProperty);
- assertEquals(undefined, super.newProperty);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.testSetterStrict = function() {
- 'use strict';
- getCalled = 0;
- setCalled = 0;
- assertTrue(42 === this);
-
- assertEquals(1, super.x);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- assertEquals(5, super.x = 5);
- assertEquals(1, getCalled);
- assertEquals(1, setCalled);
-
- assertEquals(6, super.x += 5);
- assertEquals(2, getCalled);
- assertEquals(2, setCalled);
-
- var ex;
- try {
+ testSetter() {
+ setCalled = 0;
+ getCalled = 0;
+ assertEquals('object', typeof this);
+ assertInstanceof(this, Number)
+ assertEquals(42, this.valueOf());
+ assertEquals(1, super.x);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ assertEquals(5, super.x = 5);
+ assertEquals(1, getCalled);
+ assertEquals(1, setCalled);
+
+ assertEquals(6, super.x += 5);
+ assertEquals(2, getCalled);
+ assertEquals(2, setCalled);
+
super.newProperty = 15;
- } catch (e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- }.toMethod(Derived.prototype);
+ assertEquals(15, this.newProperty);
+ assertEquals(undefined, super.newProperty);
+ },
+ testSetterStrict() {
+ 'use strict';
+ getCalled = 0;
+ setCalled = 0;
+ assertTrue(42 === this);
+
+ assertEquals(1, super.x);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ assertEquals(5, super.x = 5);
+ assertEquals(1, getCalled);
+ assertEquals(1, setCalled);
+
+ assertEquals(6, super.x += 5);
+ assertEquals(2, getCalled);
+ assertEquals(2, setCalled);
+
+ var ex;
+ try {
+ super.newProperty = 15;
+ } catch (e) { ex = e; }
+ assertInstanceof(ex, TypeError);
+ }
+ }
Derived.prototype.testSetter.call(42);
Derived.prototype.testSetterStrict.call(42);
function DerivedFromString() {}
- DerivedFromString.prototype = Object.create(String.prototype);
-
- function f() {
- 'use strict';
- assertTrue(42 === this);
- assertEquals(String.prototype.toString, super.toString);
- var ex;
- try {
- super.toString();
- } catch(e) { ex = e; }
-
- assertTrue(ex instanceof TypeError);
- }
- f.toMethod(DerivedFromString.prototype).call(42);
+ DerivedFromString.prototype = {
+ __proto__: String.prototype,
+ f() {
+ 'use strict';
+ assertTrue(42 === this);
+ assertEquals(String.prototype.toString, super.toString);
+ var ex;
+ try {
+ super.toString();
+ } catch(e) { ex = e; }
+
+ assertInstanceof(ex, TypeError);
+ }
+ };
+
+ DerivedFromString.prototype.f.call(42);
}());
@@ -655,73 +685,73 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- };
- Derived.prototype.testSetter = function() {
- setCalled = 0;
- getCalled = 0;
- assertEquals('object', typeof this);
- assertTrue(this instanceof Number)
- assertEquals(42, this.valueOf());
- assertEquals(1, super[x]);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- assertEquals(5, super[x] = 5);
- assertEquals(1, getCalled);
- assertEquals(1, setCalled);
-
- assertEquals(6, super[x] += 5);
- assertEquals(2, getCalled);
- assertEquals(2, setCalled);
-
- super[newProperty] = 15;
- assertEquals(15, this[newProperty]);
- assertEquals(undefined, super[newProperty]);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.testSetterStrict = function() {
- 'use strict';
- getCalled = 0;
- setCalled = 0;
- assertTrue(42 === this);
-
- assertEquals(1, super[x]);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- assertEquals(5, super[x] = 5);
- assertEquals(1, getCalled);
- assertEquals(1, setCalled);
-
- assertEquals(6, super[x] += 5);
- assertEquals(2, getCalled);
- assertEquals(2, setCalled);
-
- var ex;
- try {
+ testSetter() {
+ setCalled = 0;
+ getCalled = 0;
+ assertEquals('object', typeof this);
+ assertInstanceof(this, Number)
+ assertEquals(42, this.valueOf());
+ assertEquals(1, super[x]);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ assertEquals(5, super[x] = 5);
+ assertEquals(1, getCalled);
+ assertEquals(1, setCalled);
+
+ assertEquals(6, super[x] += 5);
+ assertEquals(2, getCalled);
+ assertEquals(2, setCalled);
+
super[newProperty] = 15;
- } catch (e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- }.toMethod(Derived.prototype);
+ assertEquals(15, this[newProperty]);
+ assertEquals(undefined, super[newProperty]);
+ },
+ testSetterStrict() {
+ 'use strict';
+ getCalled = 0;
+ setCalled = 0;
+ assertTrue(42 === this);
+
+ assertEquals(1, super[x]);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ assertEquals(5, super[x] = 5);
+ assertEquals(1, getCalled);
+ assertEquals(1, setCalled);
+
+ assertEquals(6, super[x] += 5);
+ assertEquals(2, getCalled);
+ assertEquals(2, setCalled);
+
+ var ex;
+ try {
+ super[newProperty] = 15;
+ } catch (e) { ex = e; }
+ assertInstanceof(ex,TypeError);
+ }
+ };
Derived.prototype.testSetter.call(42);
Derived.prototype.testSetterStrict.call(42);
function DerivedFromString() {}
- DerivedFromString.prototype = Object.create(String.prototype);
-
- function f() {
- 'use strict';
- assertTrue(42 === this);
- assertEquals(String.prototype.toString, super[toString]);
- var ex;
- try {
- super[toString]();
- } catch(e) { ex = e; }
-
- assertTrue(ex instanceof TypeError);
- }
- f.toMethod(DerivedFromString.prototype).call(42);
+ DerivedFromString.prototype = {
+ __proto__: String.prototype,
+ f() {
+ 'use strict';
+ assertTrue(42 === this);
+ assertEquals(String.prototype.toString, super[toString]);
+ var ex;
+ try {
+ super[toString]();
+ } catch(e) { ex = e; }
+
+ assertInstanceof(ex, TypeError);
+ }
+ };
+ DerivedFromString.prototype.f.call(42);
}());
@@ -750,54 +780,53 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- };
- Derived.prototype.testSetter = function() {
- setCalled = 0;
- getCalled = 0;
- assertEquals('object', typeof this);
- assertTrue(this instanceof Number)
- assertEquals(42, this.valueOf());
- assertEquals(1, super[x]);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- assertEquals(5, super[x] = 5);
- assertEquals(1, getCalled);
- assertEquals(1, setCalled);
-
- assertEquals(6, super[x] += 5);
- assertEquals(2, getCalled);
- assertEquals(2, setCalled);
-
- super[newProperty] = 15;
- assertEquals(15, this[newProperty]);
- assertEquals(undefined, super[newProperty]);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.testSetterStrict = function() {
- 'use strict';
- getCalled = 0;
- setCalled = 0;
- assertTrue(42 === this);
-
- assertEquals(1, super[x]);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- assertEquals(5, super[x] = 5);
- assertEquals(1, getCalled);
- assertEquals(1, setCalled);
-
- assertEquals(6, super[x] += 5);
- assertEquals(2, getCalled);
- assertEquals(2, setCalled);
-
- var ex;
- try {
+ testSetter() {
+ setCalled = 0;
+ getCalled = 0;
+ assertEquals('object', typeof this);
+ assertInstanceof(this, Number)
+ assertEquals(42, this.valueOf());
+ assertEquals(1, super[x]);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ assertEquals(5, super[x] = 5);
+ assertEquals(1, getCalled);
+ assertEquals(1, setCalled);
+
+ assertEquals(6, super[x] += 5);
+ assertEquals(2, getCalled);
+ assertEquals(2, setCalled);
+
super[newProperty] = 15;
- } catch (e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- }.toMethod(Derived.prototype);
+ assertEquals(15, this[newProperty]);
+ assertEquals(undefined, super[newProperty]);
+ },
+ testSetterStrict() {
+ 'use strict';
+ getCalled = 0;
+ setCalled = 0;
+ assertTrue(42 === this);
+
+ assertEquals(1, super[x]);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ assertEquals(5, super[x] = 5);
+ assertEquals(1, getCalled);
+ assertEquals(1, setCalled);
+
+ assertEquals(6, super[x] += 5);
+ assertEquals(2, getCalled);
+ assertEquals(2, setCalled);
+
+ var ex;
+ try {
+ super[newProperty] = 15;
+ } catch (e) { ex = e; }
+ assertInstanceof(ex, TypeError);
+ }
+ };
Derived.prototype.testSetter.call(42);
Derived.prototype.testSetterStrict.call(42);
@@ -807,28 +836,28 @@
(function TestKeyedNumericSetterOnExotics() {
function Base() {}
function Derived() {}
- Derived.prototype = { __proto__: Base.prototype };
-
- Derived.prototype.callSetterOnArray = function() {
- super[42] = 1;
- }.toMethod(Derived.prototype);
-
- Derived.prototype.callStrictSetterOnString = function() {
- 'use strict';
- assertEquals('string', typeof this);
- assertTrue('abcdef' === this);
- var ex = null;
- try {
- super[5] = 'q';
- } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
-
- ex = null;
- try {
- super[1024] = 'q';
- } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ callSetterOnArray() {
+ super[42] = 1;
+ },
+ callStrictSetterOnString() {
+ 'use strict';
+ assertEquals('string', typeof this);
+ assertTrue('abcdef' === this);
+ var ex = null;
+ try {
+ super[5] = 'q';
+ } catch(e) { ex = e; }
+ assertInstanceof(ex, TypeError);
+
+ ex = null;
+ try {
+ super[1024] = 'q';
+ } catch(e) { ex = e; }
+ assertInstanceof(ex, TypeError);
+ }
+ };
var x = [];
assertEquals(0, x.length);
@@ -844,23 +873,25 @@
(function TestSetterUndefinedProperties() {
function Base() {}
function Derived() {}
- Derived.prototype = { __proto__ : Base.prototype };
- Derived.prototype.mSloppy = function () {
- assertEquals(undefined, super.x);
- assertEquals(undefined, this.x);
- super.x = 10;
- assertEquals(10, this.x);
- assertEquals(undefined, super.x);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mStrict = function () {
- 'use strict';
- assertEquals(undefined, super.x);
- assertEquals(undefined, this.x);
- super.x = 10;
- assertEquals(10, this.x);
- assertEquals(undefined, super.x);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ mSloppy() {
+ assertEquals(undefined, super.x);
+ assertEquals(undefined, this.x);
+ super.x = 10;
+ assertEquals(10, this.x);
+ assertEquals(undefined, super.x);
+ },
+ mStrict() {
+ 'use strict';
+ assertEquals(undefined, super.x);
+ assertEquals(undefined, this.x);
+ super.x = 10;
+ assertEquals(10, this.x);
+ assertEquals(undefined, super.x);
+ }
+ };
+
var d = new Derived();
d.mSloppy();
assertEquals(10, d.x);
@@ -874,23 +905,24 @@
var x = 'x';
function Base() {}
function Derived() {}
- Derived.prototype = { __proto__ : Base.prototype };
- Derived.prototype.mSloppy = function () {
- assertEquals(undefined, super[x]);
- assertEquals(undefined, this[x]);
- super[x] = 10;
- assertEquals(10, this[x]);
- assertEquals(undefined, super[x]);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mStrict = function () {
- 'use strict';
- assertEquals(undefined, super[x]);
- assertEquals(undefined, this[x]);
- super[x] = 10;
- assertEquals(10, this[x]);
- assertEquals(undefined, super[x]);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ mSloppy() {
+ assertEquals(undefined, super[x]);
+ assertEquals(undefined, this[x]);
+ super[x] = 10;
+ assertEquals(10, this[x]);
+ assertEquals(undefined, super[x]);
+ },
+ mStrict() {
+ 'use strict';
+ assertEquals(undefined, super[x]);
+ assertEquals(undefined, this[x]);
+ super[x] = 10;
+ assertEquals(10, this[x]);
+ assertEquals(undefined, super[x]);
+ }
+ };
var d = new Derived();
d.mSloppy();
assertEquals(10, d.x);
@@ -904,23 +936,24 @@
var x = 42;
function Base() {}
function Derived() {}
- Derived.prototype = { __proto__ : Base.prototype };
- Derived.prototype.mSloppy = function () {
- assertEquals(undefined, super[x]);
- assertEquals(undefined, this[x]);
- super[x] = 10;
- assertEquals(10, this[x]);
- assertEquals(undefined, super[x]);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mStrict = function () {
- 'use strict';
- assertEquals(undefined, super[x]);
- assertEquals(undefined, this[x]);
- super[x] = 10;
- assertEquals(10, this[x]);
- assertEquals(undefined, super[x]);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ mSloppy() {
+ assertEquals(undefined, super[x]);
+ assertEquals(undefined, this[x]);
+ super[x] = 10;
+ assertEquals(10, this[x]);
+ assertEquals(undefined, super[x]);
+ },
+ mStrict() {
+ 'use strict';
+ assertEquals(undefined, super[x]);
+ assertEquals(undefined, this[x]);
+ super[x] = 10;
+ assertEquals(10, this[x]);
+ assertEquals(undefined, super[x]);
+ }
+ };
var d = new Derived();
d.mSloppy();
assertEquals(10, d[x]);
@@ -930,53 +963,180 @@
}());
-(function TestSetterCreatingOwnProperties() {
+(function TestSetterCreatingOwnPropertiesReconfigurable() {
function Base() {}
function Derived() {}
- Derived.prototype = { __proto__ : Base.prototype };
- var setterCalled;
-
- Derived.prototype.mSloppy = function() {
- assertEquals(42, this.ownReadOnly);
- super.ownReadOnly = 55;
- assertEquals(42, this.ownReadOnly);
-
- assertEquals(15, this.ownReadonlyAccessor);
- super.ownReadonlyAccessor = 55;
- assertEquals(15, this.ownReadonlyAccessor);
-
- setterCalled = 0;
- super.ownSetter = 42;
- assertEquals(1, setterCalled);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mStrict = function() {
- 'use strict';
- assertEquals(42, this.ownReadOnly);
- var ex;
- try {
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ mSloppy() {
+ assertEquals(42, this.ownReadOnly);
+ super.ownReadOnly = 55;
+ assertEquals(55, this.ownReadOnly);
+ var descr = Object.getOwnPropertyDescriptor(this, 'ownReadOnly');
+ assertEquals(55, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty('ownReadOnly'));
+
+ assertEquals(15, this.ownReadonlyAccessor);
+ super.ownReadonlyAccessor = 25;
+ assertEquals(25, this.ownReadonlyAccessor);
+ var descr = Object.getOwnPropertyDescriptor(this, 'ownReadonlyAccessor');
+ assertEquals(25, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty('ownReadonlyAccessor'));
+
+ super.ownSetter = 35;
+ assertEquals(35, this.ownSetter);
+ var descr = Object.getOwnPropertyDescriptor(this, 'ownSetter');
+ assertEquals(35, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty('ownSetter'));
+ },
+ mStrict() {
+ 'use strict';
+ assertEquals(42, this.ownReadOnly);
super.ownReadOnly = 55;
- } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- assertEquals(42, this.ownReadOnly);
-
- assertEquals(15, this.ownReadonlyAccessor);
- ex = null;
- try {
- super.ownReadonlyAccessor = 55;
- } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- assertEquals(15, this.ownReadonlyAccessor);
-
- setterCalled = 0;
- super.ownSetter = 42;
- assertEquals(1, setterCalled);
- }.toMethod(Derived.prototype);
+ assertEquals(55, this.ownReadOnly);
+ var descr = Object.getOwnPropertyDescriptor(this, 'ownReadOnly');
+ assertEquals(55, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty('ownReadOnly'));
+
+ assertEquals(15, this.ownReadonlyAccessor);
+ super.ownReadonlyAccessor = 25;
+ assertEquals(25, this.ownReadonlyAccessor);
+ var descr = Object.getOwnPropertyDescriptor(this, 'ownReadonlyAccessor');
+ assertEquals(25, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty('ownReadonlyAccessor'));
+
+ super.ownSetter = 35;
+ assertEquals(35, this.ownSetter);
+ var descr = Object.getOwnPropertyDescriptor(this, 'ownSetter');
+ assertEquals(35, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty('ownSetter'));
+ },
+ };
+
+ var d = new Derived();
+ Object.defineProperty(d, 'ownReadOnly', {
+ value: 42,
+ writable: false,
+ configurable: true
+ });
+ Object.defineProperty(d, 'ownSetter', {
+ set: function() { assertUnreachable(); },
+ configurable: true
+ });
+ Object.defineProperty(d, 'ownReadonlyAccessor', {
+ get: function() { return 15; },
+ configurable: true
+ });
+
+ d.mSloppy();
+
+ var d = new Derived();
+ Object.defineProperty(d, 'ownReadOnly', {
+ value: 42,
+ writable: false,
+ configurable: true
+ });
+ Object.defineProperty(d, 'ownSetter', {
+ set: function() { assertUnreachable(); },
+ configurable: true
+ });
+ Object.defineProperty(d, 'ownReadonlyAccessor', {
+ get: function() { return 15; },
+ configurable: true
+ });
+ d.mStrict();
+}());
+
+
+(function TestSetterCreatingOwnPropertiesNonConfigurable() {
+ function Base() {}
+ function Derived() {}
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ mSloppy() {
+ assertEquals(42, this.ownReadOnly);
+ super.ownReadOnly = 55;
+ assertEquals(42, this.ownReadOnly);
+ var descr = Object.getOwnPropertyDescriptor(this, 'ownReadOnly');
+ assertEquals(42, descr.value);
+ assertFalse(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty('ownReadOnly'));
+
+ assertEquals(15, this.ownReadonlyAccessor);
+ super.ownReadonlyAccessor = 25;
+ assertEquals(15, this.ownReadonlyAccessor);
+ var descr = Object.getOwnPropertyDescriptor(this, 'ownReadonlyAccessor');
+ assertFalse(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(Base.prototype.hasOwnProperty('ownReadonlyAccessor'));
+
+ super.ownSetter = 35;
+ var descr = Object.getOwnPropertyDescriptor(this, 'ownSetter');
+ assertFalse(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(Base.prototype.hasOwnProperty('ownSetter'));
+ },
+ mStrict() {
+ 'use strict';
+ var ex;
+ assertEquals(42, this.ownReadOnly);
+ try {
+ super.ownReadOnly = 55;
+ } catch (e) {
+ ex = e;
+ }
+ assertInstanceof(ex, TypeError);
+ assertEquals(
+ "Cannot assign to read only property 'ownReadOnly' of #<Base>",
+ ex.message);
+ assertEquals(42, this.ownReadOnly);
+
+ ex = null;
+ assertEquals(15, this.ownReadonlyAccessor);
+ try {
+ super.ownReadonlyAccessor = 25;
+ } catch (e) {
+ ex = e;
+ }
+ assertInstanceof(ex, TypeError);
+ assertEquals('Cannot redefine property: ownReadonlyAccessor', ex.message);
+ assertEquals(15, this.ownReadonlyAccessor);
+
+ ex = null;
+ try {
+ super.ownSetter = 35;
+ } catch (e) {
+ ex = e;
+ }
+ assertInstanceof(ex, TypeError);
+ assertEquals('Cannot redefine property: ownSetter', ex.message);
+ }
+ };
var d = new Derived();
Object.defineProperty(d, 'ownReadOnly', { value : 42, writable : false });
Object.defineProperty(d, 'ownSetter',
- { set : function() { setterCalled++; } });
+ { set : function() { assertUnreachable(); } });
Object.defineProperty(d, 'ownReadonlyAccessor',
{ get : function() { return 15; }});
d.mSloppy();
@@ -1006,156 +1166,233 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
+ testIter() {
+ setCalled = 0;
+ getCalled = 0;
+ for (super.x in [1,2,3]) {}
+ assertEquals(0, getCalled);
+ assertEquals(3, setCalled);
+ assertEquals(["0", "1", "2"], this.x_);
+ },
+ testIterKeyed() {
+ setCalled = 0;
+ getCalled = 0;
+ for (super[x] in [1,2,3]) {}
+ assertEquals(0, getCalled);
+ assertEquals(3, setCalled);
+ assertEquals(["0","1","2"], this.x_);
+
+ this.x_ = [];
+ setCalled = 0;
+ getCalled = 0;
+ var toStringCalled = 0;
+ var o = {toString: function () { toStringCalled++; return x }};
+ for (super[o] in [1,2,3]) {}
+ assertEquals(0, getCalled);
+ assertEquals(3, setCalled);
+ assertEquals(3, toStringCalled);
+ assertEquals(["0","1","2"], this.x_);
+ }
};
- Derived.prototype.testIter = function() {
- setCalled = 0;
- getCalled = 0;
- for (super.x in [1,2,3]) {}
- assertEquals(0, getCalled);
- assertEquals(3, setCalled);
- assertEquals(["0","1","2"], this.x_);
- }.toMethod(Derived.prototype);
-
new Derived().testIter();
var x = 'x';
- Derived.prototype.testIterKeyed = function() {
- setCalled = 0;
- getCalled = 0;
- for (super[x] in [1,2,3]) {}
- assertEquals(0, getCalled);
- assertEquals(3, setCalled);
- assertEquals(["0","1","2"], this.x_);
-
- this.x_ = [];
- setCalled = 0;
- getCalled = 0;
- var toStringCalled = 0;
- var o = {toString: function () { toStringCalled++; return x }};
- for (super[o] in [1,2,3]) {}
- assertEquals(0, getCalled);
- assertEquals(3, setCalled);
- assertEquals(3, toStringCalled);
- assertEquals(["0","1","2"], this.x_);
- }.toMethod(Derived.prototype);
new Derived().testIterKeyed();
}());
-(function TestKeyedSetterCreatingOwnProperties() {
- var ownReadOnly = 'ownReadOnly';
- var ownReadonlyAccessor = 'ownReadonlyAccessor';
- var ownSetter = 'ownSetter';
+function TestKeyedSetterCreatingOwnPropertiesReconfigurable(ownReadOnly,
+ ownReadonlyAccessor, ownSetter) {
function Base() {}
function Derived() {}
- Derived.prototype = { __proto__ : Base.prototype };
- var setterCalled;
-
- Derived.prototype.mSloppy = function() {
- assertEquals(42, this[ownReadOnly]);
- super[ownReadOnly] = 55;
- assertEquals(42, this[ownReadOnly]);
-
- assertEquals(15, this[ownReadonlyAccessor]);
- super[ownReadonlyAccessor] = 55;
- assertEquals(15, this[ownReadonlyAccessor]);
-
- setterCalled = 0;
- super[ownSetter] = 42;
- assertEquals(1, setterCalled);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mStrict = function() {
- 'use strict';
- assertEquals(42, this[ownReadOnly]);
- var ex;
- try {
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ mSloppy() {
+ assertEquals(42, this[ownReadOnly]);
super[ownReadOnly] = 55;
- } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- assertEquals(42, this[ownReadOnly]);
-
- assertEquals(15, this[ownReadonlyAccessor]);
- ex = null;
- try {
- super[ownReadonlyAccessor] = 55;
- } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- assertEquals(15, this[ownReadonlyAccessor]);
-
- setterCalled = 0;
- super[ownSetter] = 42;
- assertEquals(1, setterCalled);
- }.toMethod(Derived.prototype);
+ assertEquals(55, this[ownReadOnly]);
+ var descr = Object.getOwnPropertyDescriptor(this, ownReadOnly);
+ assertEquals(55, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty(ownReadOnly));
+
+ assertEquals(15, this[ownReadonlyAccessor]);
+ super[ownReadonlyAccessor] = 25;
+ assertEquals(25, this[ownReadonlyAccessor]);
+ var descr = Object.getOwnPropertyDescriptor(this, ownReadonlyAccessor);
+ assertEquals(25, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty(ownReadonlyAccessor));
+
+ super[ownSetter] = 35;
+ assertEquals(35, this[ownSetter]);
+ var descr = Object.getOwnPropertyDescriptor(this, ownSetter);
+ assertEquals(35, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty(ownSetter));
+ },
+ mStrict() {
+ 'use strict';
+ assertEquals(42, this[ownReadOnly]);
+ super[ownReadOnly] = 55;
+ assertEquals(55, this[ownReadOnly]);
+ var descr = Object.getOwnPropertyDescriptor(this, ownReadOnly);
+ assertEquals(55, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty(ownReadOnly));
+
+ assertEquals(15, this[ownReadonlyAccessor]);
+ super[ownReadonlyAccessor] = 25;
+ assertEquals(25, this[ownReadonlyAccessor]);
+ var descr = Object.getOwnPropertyDescriptor(this, ownReadonlyAccessor);
+ assertEquals(25, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty(ownReadonlyAccessor));
+
+ super[ownSetter] = 35;
+ assertEquals(35, this[ownSetter]);
+ var descr = Object.getOwnPropertyDescriptor(this, ownSetter);
+ assertEquals(35, descr.value);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty(ownSetter));
+ },
+ };
var d = new Derived();
- Object.defineProperty(d, 'ownReadOnly', { value : 42, writable : false });
- Object.defineProperty(d, 'ownSetter',
- { set : function() { setterCalled++; } });
- Object.defineProperty(d, 'ownReadonlyAccessor',
- { get : function() { return 15; }});
+ Object.defineProperty(d, ownReadOnly, {
+ value: 42,
+ writable: false,
+ configurable: true
+ });
+ Object.defineProperty(d, ownSetter, {
+ set: function() { assertUnreachable(); },
+ configurable: true
+ });
+ Object.defineProperty(d, ownReadonlyAccessor, {
+ get: function() { return 15; },
+ configurable: true
+ });
+
d.mSloppy();
+
+ var d = new Derived();
+ Object.defineProperty(d, ownReadOnly, {
+ value: 42,
+ writable: false,
+ configurable: true
+ });
+ Object.defineProperty(d, ownSetter, {
+ set: function() { assertUnreachable(); },
+ configurable: true
+ });
+ Object.defineProperty(d, ownReadonlyAccessor, {
+ get: function() { return 15; },
+ configurable: true
+ });
d.mStrict();
-}());
+}
+TestKeyedSetterCreatingOwnPropertiesReconfigurable('ownReadOnly',
+ 'ownReadonlyAccessor',
+ 'ownSetter');
+TestKeyedSetterCreatingOwnPropertiesReconfigurable(42, 43, 44);
-(function TestKeyedNumericSetterCreatingOwnProperties() {
- var ownReadOnly = 42;
- var ownReadonlyAccessor = 43;
- var ownSetter = 44;
+function TestKeyedSetterCreatingOwnPropertiesNonConfigurable(
+ ownReadOnly, ownReadonlyAccessor, ownSetter) {
function Base() {}
function Derived() {}
- Derived.prototype = { __proto__ : Base.prototype };
- var setterCalled;
-
- Derived.prototype.mSloppy = function() {
- assertEquals(42, this[ownReadOnly]);
- super[ownReadOnly] = 55;
- assertEquals(42, this[ownReadOnly]);
-
- assertEquals(15, this[ownReadonlyAccessor]);
- super[ownReadonlyAccessor] = 55;
- assertEquals(15, this[ownReadonlyAccessor]);
-
- setterCalled = 0;
- super[ownSetter] = 42;
- assertEquals(1, setterCalled);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mStrict = function() {
- 'use strict';
- assertEquals(42, this[ownReadOnly]);
- var ex;
- try {
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ mSloppy() {
+ assertEquals(42, this[ownReadOnly]);
super[ownReadOnly] = 55;
- } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- assertEquals(42, this[ownReadOnly]);
-
- assertEquals(15, this[ownReadonlyAccessor]);
- ex = null;
- try {
- super[ownReadonlyAccessor] = 55;
- } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- assertEquals(15, this[ownReadonlyAccessor]);
-
- setterCalled = 0;
- super[ownSetter] = 42;
- assertEquals(1, setterCalled);
- }.toMethod(Derived.prototype);
+ assertEquals(42, this[ownReadOnly]);
+ var descr = Object.getOwnPropertyDescriptor(this, ownReadOnly);
+ assertEquals(42, descr.value);
+ assertFalse(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(descr.writable);
+ assertFalse(Base.prototype.hasOwnProperty(ownReadOnly));
+
+ assertEquals(15, this[ownReadonlyAccessor]);
+ super[ownReadonlyAccessor] = 25;
+ assertEquals(15, this[ownReadonlyAccessor]);
+ var descr = Object.getOwnPropertyDescriptor(this, ownReadonlyAccessor);
+ assertFalse(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(Base.prototype.hasOwnProperty(ownReadonlyAccessor));
+
+ super[ownSetter] = 35;
+ var descr = Object.getOwnPropertyDescriptor(this, ownSetter);
+ assertFalse(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertFalse(Base.prototype.hasOwnProperty(ownSetter));
+ },
+ mStrict() {
+ 'use strict';
+ var ex;
+ assertEquals(42, this[ownReadOnly]);
+ try {
+ super[ownReadOnly] = 55;
+ } catch (e) {
+ ex = e;
+ }
+ assertInstanceof(ex, TypeError);
+ assertEquals(
+ "Cannot assign to read only property '" + ownReadOnly +
+ "' of #<Base>",
+ ex.message);
+ assertEquals(42, this[ownReadOnly]);
+
+ ex = null;
+ assertEquals(15, this[ownReadonlyAccessor]);
+ try {
+ super[ownReadonlyAccessor] = 25;
+ } catch (e) {
+ ex = e;
+ }
+ assertInstanceof(ex, TypeError);
+ assertEquals('Cannot redefine property: ' + ownReadonlyAccessor,
+ ex.message);
+ assertEquals(15, this[ownReadonlyAccessor]);
+
+ ex = null;
+ try {
+ super[ownSetter] = 35;
+ } catch (e) {
+ ex = e;
+ }
+ assertInstanceof(ex, TypeError);
+ assertEquals('Cannot redefine property: ' + ownSetter, ex.message);
+ }
+ };
var d = new Derived();
Object.defineProperty(d, ownReadOnly, { value : 42, writable : false });
Object.defineProperty(d, ownSetter,
- { set : function() { setterCalled++; } });
+ { set : function() { assertUnreachable(); } });
Object.defineProperty(d, ownReadonlyAccessor,
{ get : function() { return 15; }});
d.mSloppy();
d.mStrict();
-}());
+}
+TestKeyedSetterCreatingOwnPropertiesNonConfigurable('ownReadOnly',
+ 'ownReadonlyAccessor', 'ownSetter');
+TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
(function TestSetterNoProtoWalk() {
@@ -1164,62 +1401,58 @@
var getCalled;
var setCalled;
Derived.prototype = {
- __proto__ : Base.prototype,
+ __proto__: Base.prototype,
get x() { getCalled++; return 42; },
- set x(v) { setCalled++; }
+ set x(v) { setCalled++; },
+ mSloppy() {
+ setCalled = 0;
+ getCalled = 0;
+ assertEquals(42, this.x);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ this.x = 43;
+ assertEquals(0, getCalled);
+ assertEquals(1, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ super.x = 15;
+ assertEquals(0, setCalled);
+ assertEquals(0, getCalled);
+
+ assertEquals(15, this.x);
+ assertEquals(0, getCalled);
+ assertEquals(0, setCalled);
+ },
+ mStrict() {
+ 'use strict';
+ setCalled = 0;
+ getCalled = 0;
+ assertEquals(42, this.x);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ this.x = 43;
+ assertEquals(0, getCalled);
+ assertEquals(1, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ super.x = 15;
+ assertEquals(0, setCalled);
+ assertEquals(0, getCalled);
+
+ assertEquals(15, this.x);
+ assertEquals(0, getCalled);
+ assertEquals(0, setCalled);
+ }
};
- Derived.prototype.mSloppy = function() {
- setCalled = 0;
- getCalled = 0;
- assertEquals(42, this.x);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- this.x = 43;
- assertEquals(0, getCalled);
- assertEquals(1, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- super.x = 15;
- assertEquals(0, setCalled);
- assertEquals(0, getCalled);
-
- assertEquals(15, this.x);
- assertEquals(0, getCalled);
- assertEquals(0, setCalled);
-
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mStrict = function() {
- 'use strict';
- setCalled = 0;
- getCalled = 0;
- assertEquals(42, this.x);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- this.x = 43;
- assertEquals(0, getCalled);
- assertEquals(1, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- super.x = 15;
- assertEquals(0, setCalled);
- assertEquals(0, getCalled);
-
- assertEquals(15, this.x);
- assertEquals(0, getCalled);
- assertEquals(0, setCalled);
-
- }.toMethod(Derived.prototype);
-
new Derived().mSloppy();
new Derived().mStrict();
}());
@@ -1232,62 +1465,58 @@
var getCalled;
var setCalled;
Derived.prototype = {
- __proto__ : Base.prototype,
+ __proto__: Base.prototype,
get x() { getCalled++; return 42; },
- set x(v) { setCalled++; }
+ set x(v) { setCalled++; },
+ mSloppy() {
+ setCalled = 0;
+ getCalled = 0;
+ assertEquals(42, this[x]);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ this[x] = 43;
+ assertEquals(0, getCalled);
+ assertEquals(1, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ super[x] = 15;
+ assertEquals(0, setCalled);
+ assertEquals(0, getCalled);
+
+ assertEquals(15, this[x]);
+ assertEquals(0, getCalled);
+ assertEquals(0, setCalled);
+ },
+ mStrict() {
+ 'use strict';
+ setCalled = 0;
+ getCalled = 0;
+ assertEquals(42, this[x]);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ this[x] = 43;
+ assertEquals(0, getCalled);
+ assertEquals(1, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ super[x] = 15;
+ assertEquals(0, setCalled);
+ assertEquals(0, getCalled);
+
+ assertEquals(15, this[x]);
+ assertEquals(0, getCalled);
+ assertEquals(0, setCalled);
+ }
};
- Derived.prototype.mSloppy = function() {
- setCalled = 0;
- getCalled = 0;
- assertEquals(42, this[x]);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- this[x] = 43;
- assertEquals(0, getCalled);
- assertEquals(1, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- super[x] = 15;
- assertEquals(0, setCalled);
- assertEquals(0, getCalled);
-
- assertEquals(15, this[x]);
- assertEquals(0, getCalled);
- assertEquals(0, setCalled);
-
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mStrict = function() {
- 'use strict';
- setCalled = 0;
- getCalled = 0;
- assertEquals(42, this[x]);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- this[x] = 43;
- assertEquals(0, getCalled);
- assertEquals(1, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- super[x] = 15;
- assertEquals(0, setCalled);
- assertEquals(0, getCalled);
-
- assertEquals(15, this[x]);
- assertEquals(0, getCalled);
- assertEquals(0, setCalled);
-
- }.toMethod(Derived.prototype);
-
new Derived().mSloppy();
new Derived().mStrict();
}());
@@ -1300,7 +1529,54 @@
var getCalled;
var setCalled;
Derived.prototype = {
- __proto__ : Base.prototype,
+ __proto__: Base.prototype,
+ mSloppy() {
+ setCalled = 0;
+ getCalled = 0;
+ assertEquals(42, this[x]);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ this[x] = 43;
+ assertEquals(0, getCalled);
+ assertEquals(1, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ super[x] = 15;
+ assertEquals(0, setCalled);
+ assertEquals(0, getCalled);
+
+ assertEquals(15, this[x]);
+ assertEquals(0, getCalled);
+ assertEquals(0, setCalled);
+ },
+ mStrict() {
+ 'use strict';
+ setCalled = 0;
+ getCalled = 0;
+ assertEquals(42, this[x]);
+ assertEquals(1, getCalled);
+ assertEquals(0, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ this[x] = 43;
+ assertEquals(0, getCalled);
+ assertEquals(1, setCalled);
+
+ getCalled = 0;
+ setCalled = 0;
+ super[x] = 15;
+ assertEquals(0, setCalled);
+ assertEquals(0, getCalled);
+
+ assertEquals(15, this[x]);
+ assertEquals(0, getCalled);
+ assertEquals(0, setCalled);
+ }
};
Object.defineProperty(Derived.prototype, x, {
@@ -1308,57 +1584,6 @@
set: function(v) { setCalled++; }
});
- Derived.prototype.mSloppy = function() {
- setCalled = 0;
- getCalled = 0;
- assertEquals(42, this[x]);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- this[x] = 43;
- assertEquals(0, getCalled);
- assertEquals(1, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- super[x] = 15;
- assertEquals(0, setCalled);
- assertEquals(0, getCalled);
-
- assertEquals(15, this[x]);
- assertEquals(0, getCalled);
- assertEquals(0, setCalled);
-
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mStrict = function() {
- 'use strict';
- setCalled = 0;
- getCalled = 0;
- assertEquals(42, this[x]);
- assertEquals(1, getCalled);
- assertEquals(0, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- this[x] = 43;
- assertEquals(0, getCalled);
- assertEquals(1, setCalled);
-
- getCalled = 0;
- setCalled = 0;
- super[x] = 15;
- assertEquals(0, setCalled);
- assertEquals(0, getCalled);
-
- assertEquals(15, this[x]);
- assertEquals(0, getCalled);
- assertEquals(0, setCalled);
-
- }.toMethod(Derived.prototype);
-
new Derived().mSloppy();
new Derived().mStrict();
}());
@@ -1367,35 +1592,36 @@
(function TestSetterDoesNotReconfigure() {
function Base() {}
function Derived() {}
-
- Derived.prototype.mStrict = function (){
- 'use strict';
- super.nonEnumConfig = 5;
- var d1 = Object.getOwnPropertyDescriptor(this, 'nonEnumConfig');
- assertEquals(5, d1.value);
- assertTrue(d1.configurable);
- assertFalse(d1.enumerable);
-
- super.nonEnumNonConfig = 5;
- var d1 = Object.getOwnPropertyDescriptor(this, 'nonEnumNonConfig');
- assertEquals(5, d1.value);
- assertFalse(d1.configurable);
- assertFalse(d1.enumerable);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mSloppy = function (){
- super.nonEnumConfig = 42;
- var d1 = Object.getOwnPropertyDescriptor(this, 'nonEnumConfig');
- assertEquals(42, d1.value);
- assertTrue(d1.configurable);
- assertFalse(d1.enumerable);
-
- super.nonEnumNonConfig = 42;
- var d1 = Object.getOwnPropertyDescriptor(this, 'nonEnumNonConfig');
- assertEquals(42, d1.value);
- assertFalse(d1.configurable);
- assertFalse(d1.enumerable);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Derived.prototype,
+ mStrict(){
+ 'use strict';
+ super.nonEnumConfig = 5;
+ var d1 = Object.getOwnPropertyDescriptor(this, 'nonEnumConfig');
+ assertEquals(5, d1.value);
+ assertTrue(d1.configurable);
+ assertFalse(d1.enumerable);
+
+ super.nonEnumNonConfig = 5;
+ var d1 = Object.getOwnPropertyDescriptor(this, 'nonEnumNonConfig');
+ assertEquals(5, d1.value);
+ assertFalse(d1.configurable);
+ assertFalse(d1.enumerable);
+ },
+ mSloppy(){
+ super.nonEnumConfig = 42;
+ var d1 = Object.getOwnPropertyDescriptor(this, 'nonEnumConfig');
+ assertEquals(42, d1.value);
+ assertTrue(d1.configurable);
+ assertFalse(d1.enumerable);
+
+ super.nonEnumNonConfig = 42;
+ var d1 = Object.getOwnPropertyDescriptor(this, 'nonEnumNonConfig');
+ assertEquals(42, d1.value);
+ assertFalse(d1.configurable);
+ assertFalse(d1.enumerable);
+ }
+ };
var d = new Derived();
Object.defineProperty(d, 'nonEnumConfig',
@@ -1413,36 +1639,36 @@
function Base() {}
function Derived() {}
- Derived.prototype = { __proto__: Base.prototype };
-
- Derived.prototype.mStrict = function (){
- 'use strict';
- super[nonEnumConfig] = 5;
- var d1 = Object.getOwnPropertyDescriptor(this, nonEnumConfig);
- assertEquals(5, d1.value);
- assertTrue(d1.configurable);
- assertFalse(d1.enumerable);
-
- super[nonEnumNonConfig] = 5;
- var d1 = Object.getOwnPropertyDescriptor(this, nonEnumNonConfig);
- assertEquals(5, d1.value);
- assertFalse(d1.configurable);
- assertFalse(d1.enumerable);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mSloppy = function (){
- super[nonEnumConfig] = 42;
- var d1 = Object.getOwnPropertyDescriptor(this, nonEnumConfig);
- assertEquals(42, d1.value);
- assertTrue(d1.configurable);
- assertFalse(d1.enumerable);
-
- super[nonEnumNonConfig] = 42;
- var d1 = Object.getOwnPropertyDescriptor(this, nonEnumNonConfig);
- assertEquals(42, d1.value);
- assertFalse(d1.configurable);
- assertFalse(d1.enumerable);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ mStrict(){
+ 'use strict';
+ super[nonEnumConfig] = 5;
+ var d1 = Object.getOwnPropertyDescriptor(this, nonEnumConfig);
+ assertEquals(5, d1.value);
+ assertTrue(d1.configurable);
+ assertFalse(d1.enumerable);
+
+ super[nonEnumNonConfig] = 5;
+ var d1 = Object.getOwnPropertyDescriptor(this, nonEnumNonConfig);
+ assertEquals(5, d1.value);
+ assertFalse(d1.configurable);
+ assertFalse(d1.enumerable);
+ },
+ mSloppy(){
+ super[nonEnumConfig] = 42;
+ var d1 = Object.getOwnPropertyDescriptor(this, nonEnumConfig);
+ assertEquals(42, d1.value);
+ assertTrue(d1.configurable);
+ assertFalse(d1.enumerable);
+
+ super[nonEnumNonConfig] = 42;
+ var d1 = Object.getOwnPropertyDescriptor(this, nonEnumNonConfig);
+ assertEquals(42, d1.value);
+ assertFalse(d1.configurable);
+ assertFalse(d1.enumerable);
+ }
+ };
var d = new Derived();
Object.defineProperty(d, nonEnumConfig,
@@ -1460,36 +1686,36 @@
function Base() {}
function Derived() {}
- Derived.prototype = { __proto__: Base.prototype };
-
- Derived.prototype.mStrict = function (){
- 'use strict';
- super[nonEnumConfig] = 5;
- var d1 = Object.getOwnPropertyDescriptor(this, nonEnumConfig);
- assertEquals(5, d1.value);
- assertTrue(d1.configurable);
- assertFalse(d1.enumerable);
-
- super[nonEnumNonConfig] = 5;
- var d1 = Object.getOwnPropertyDescriptor(this, nonEnumNonConfig);
- assertEquals(5, d1.value);
- assertFalse(d1.configurable);
- assertFalse(d1.enumerable);
- }.toMethod(Derived.prototype);
-
- Derived.prototype.mSloppy = function (){
- super[nonEnumConfig] = 42;
- var d1 = Object.getOwnPropertyDescriptor(this, nonEnumConfig);
- assertEquals(42, d1.value);
- assertTrue(d1.configurable);
- assertFalse(d1.enumerable);
-
- super[nonEnumNonConfig] = 42;
- var d1 = Object.getOwnPropertyDescriptor(this, nonEnumNonConfig);
- assertEquals(42, d1.value);
- assertFalse(d1.configurable);
- assertFalse(d1.enumerable);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ mStrict(){
+ 'use strict';
+ super[nonEnumConfig] = 5;
+ var d1 = Object.getOwnPropertyDescriptor(this, nonEnumConfig);
+ assertEquals(5, d1.value);
+ assertTrue(d1.configurable);
+ assertFalse(d1.enumerable);
+
+ super[nonEnumNonConfig] = 5;
+ var d1 = Object.getOwnPropertyDescriptor(this, nonEnumNonConfig);
+ assertEquals(5, d1.value);
+ assertFalse(d1.configurable);
+ assertFalse(d1.enumerable);
+ },
+ mSloppy(){
+ super[nonEnumConfig] = 42;
+ var d1 = Object.getOwnPropertyDescriptor(this, nonEnumConfig);
+ assertEquals(42, d1.value);
+ assertTrue(d1.configurable);
+ assertFalse(d1.enumerable);
+
+ super[nonEnumNonConfig] = 42;
+ var d1 = Object.getOwnPropertyDescriptor(this, nonEnumNonConfig);
+ assertEquals(42, d1.value);
+ assertFalse(d1.configurable);
+ assertFalse(d1.enumerable);
+ }
+ };
var d = new Derived();
Object.defineProperty(d, nonEnumConfig,
@@ -1519,32 +1745,31 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- _x: 2
+ _x: 2,
+ testCounts() {
+ assertEquals(2, this._x);
+ assertEquals(2, super.x);
+ super.x++;
+ assertEquals(3, super.x);
+ ++super.x;
+ assertEquals(4, super.x);
+ assertEquals(4, super.x++);
+ assertEquals(5, super.x);
+ assertEquals(6, ++super.x);
+ assertEquals(6, super.x);
+ assertEquals(6, this._x);
+
+ super.x--;
+ assertEquals(5, super.x);
+ --super.x;
+ assertEquals(4, super.x);
+ assertEquals(4, super.x--);
+ assertEquals(3, super.x);
+ assertEquals(2, --super.x);
+ assertEquals(2, super.x);
+ assertEquals(2, this._x);
+ }
};
-
- Derived.prototype.testCounts = function() {
- assertEquals(2, this._x);
- assertEquals(2, super.x);
- super.x++;
- assertEquals(3, super.x);
- ++super.x;
- assertEquals(4, super.x);
- assertEquals(4, super.x++);
- assertEquals(5, super.x);
- assertEquals(6, ++super.x);
- assertEquals(6, super.x);
- assertEquals(6, this._x);
-
- super.x--;
- assertEquals(5, super.x);
- --super.x;
- assertEquals(4, super.x);
- assertEquals(4, super.x--);
- assertEquals(3, super.x);
- assertEquals(2, --super.x);
- assertEquals(2, super.x);
- assertEquals(2, this._x);
- }.toMethod(Derived.prototype);
new Derived().testCounts();
}());
@@ -1568,32 +1793,31 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- _x: 2
+ _x: 2,
+ testCounts() {
+ assertEquals(2, this._x);
+ assertEquals(2, super[x]);
+ super[x]++;
+ assertEquals(3, super[x]);
+ ++super[x];
+ assertEquals(4, super[x]);
+ assertEquals(4, super[x]++);
+ assertEquals(5, super[x]);
+ assertEquals(6, ++super[x]);
+ assertEquals(6, super[x]);
+ assertEquals(6, this._x);
+
+ super[x]--;
+ assertEquals(5, super[x]);
+ --super[x];
+ assertEquals(4, super[x]);
+ assertEquals(4, super[x]--);
+ assertEquals(3, super[x]);
+ assertEquals(2, --super[x]);
+ assertEquals(2, super[x]);
+ assertEquals(2, this._x);
+ }
};
-
- Derived.prototype.testCounts = function() {
- assertEquals(2, this._x);
- assertEquals(2, super[x]);
- super[x]++;
- assertEquals(3, super[x]);
- ++super[x];
- assertEquals(4, super[x]);
- assertEquals(4, super[x]++);
- assertEquals(5, super[x]);
- assertEquals(6, ++super[x]);
- assertEquals(6, super[x]);
- assertEquals(6, this._x);
-
- super[x]--;
- assertEquals(5, super[x]);
- --super[x];
- assertEquals(4, super[x]);
- assertEquals(4, super[x]--);
- assertEquals(3, super[x]);
- assertEquals(2, --super[x]);
- assertEquals(2, super[x]);
- assertEquals(2, this._x);
- }.toMethod(Derived.prototype);
new Derived().testCounts();
}());
@@ -1616,32 +1840,31 @@
Derived.prototype = {
__proto__: Base.prototype,
constructor: Derived,
- _x: 2
+ _x: 2,
+ testCounts() {
+ assertEquals(2, this._x);
+ assertEquals(2, super[x]);
+ super[x]++;
+ assertEquals(3, super[x]);
+ ++super[x];
+ assertEquals(4, super[x]);
+ assertEquals(4, super[x]++);
+ assertEquals(5, super[x]);
+ assertEquals(6, ++super[x]);
+ assertEquals(6, super[x]);
+ assertEquals(6, this._x);
+
+ super[x]--;
+ assertEquals(5, super[x]);
+ --super[x];
+ assertEquals(4, super[x]);
+ assertEquals(4, super[x]--);
+ assertEquals(3, super[x]);
+ assertEquals(2, --super[x]);
+ assertEquals(2, super[x]);
+ assertEquals(2, this._x);
+ }
};
-
- Derived.prototype.testCounts = function() {
- assertEquals(2, this._x);
- assertEquals(2, super[x]);
- super[x]++;
- assertEquals(3, super[x]);
- ++super[x];
- assertEquals(4, super[x]);
- assertEquals(4, super[x]++);
- assertEquals(5, super[x]);
- assertEquals(6, ++super[x]);
- assertEquals(6, super[x]);
- assertEquals(6, this._x);
-
- super[x]--;
- assertEquals(5, super[x]);
- --super[x];
- assertEquals(4, super[x]);
- assertEquals(4, super[x]--);
- assertEquals(3, super[x]);
- assertEquals(2, --super[x]);
- assertEquals(2, super[x]);
- assertEquals(2, this._x);
- }.toMethod(Derived.prototype);
new Derived().testCounts();
}());
@@ -1650,26 +1873,27 @@
function Base() {}
Object.defineProperty(Base.prototype, 'x', { value : 27, writable: false });
function Derived() {}
-
- Derived.prototype = { __proto__: Base.prototype, constructor: Derived };
-
- Derived.prototype.mSloppy = function() {
- assertEquals(27, super.x);
- assertEquals(27, this.x);
- super.x = 10;
- assertEquals(27, super.x);
- assertEquals(27, this.x);
- }.toMethod(Derived.prototype);
- Derived.prototype.mStrict = function() {
- 'use strict';
- assertEquals(27, super.x);
- assertEquals(27, this.x);
- var ex = null;
- try { super.x = 10; } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- assertEquals(27, super.x);
- assertEquals(27, this.x);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ constructor: Derived,
+ mSloppy() {
+ assertEquals(27, super.x);
+ assertEquals(27, this.x);
+ super.x = 10;
+ assertEquals(27, super.x);
+ assertEquals(27, this.x);
+ },
+ mStrict() {
+ 'use strict';
+ assertEquals(27, super.x);
+ assertEquals(27, this.x);
+ var ex = null;
+ try { super.x = 10; } catch(e) { ex = e; }
+ assertInstanceof(ex, TypeError);
+ assertEquals(27, super.x);
+ assertEquals(27, this.x);
+ }
+ };
new Derived().mSloppy();
new Derived().mStrict();
}());
@@ -1681,25 +1905,27 @@
Object.defineProperty(Base.prototype, x, { value : 27, writable: false });
function Derived() {}
- Derived.prototype = { __proto__: Base.prototype, constructor: Derived };
-
- Derived.prototype.mSloppy = function() {
- assertEquals(27, super[x]);
- assertEquals(27, this[x]);
- super[x] = 10;
- assertEquals(27, super[x]);
- assertEquals(27, this[x]);
- }.toMethod(Derived.prototype);
- Derived.prototype.mStrict = function() {
- 'use strict';
- assertEquals(27, super[x]);
- assertEquals(27, this[x]);
- var ex = null;
- try { super[x] = 10; } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- assertEquals(27, super[x]);
- assertEquals(27, this[x]);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ constructor: Derived,
+ mSloppy() {
+ assertEquals(27, super[x]);
+ assertEquals(27, this[x]);
+ super[x] = 10;
+ assertEquals(27, super[x]);
+ assertEquals(27, this[x]);
+ },
+ mStrict() {
+ 'use strict';
+ assertEquals(27, super[x]);
+ assertEquals(27, this[x]);
+ var ex = null;
+ try { super[x] = 10; } catch(e) { ex = e; }
+ assertInstanceof(ex, TypeError);
+ assertEquals(27, super[x]);
+ assertEquals(27, this[x]);
+ }
+ };
new Derived().mSloppy();
new Derived().mStrict();
}());
@@ -1711,55 +1937,51 @@
Object.defineProperty(Base.prototype, x, { value : 27, writable: false });
function Derived() {}
- Derived.prototype = { __proto__: Base.prototype, constructor: Derived };
-
- Derived.prototype.mSloppy = function() {
- assertEquals(27, super[x]);
- assertEquals(27, this[x]);
- super[x] = 10;
- assertEquals(27, super[x]);
- assertEquals(27, this[x]);
- }.toMethod(Derived.prototype);
- Derived.prototype.mStrict = function() {
- 'use strict';
- assertEquals(27, super[x]);
- assertEquals(27, this[x]);
- var ex = null;
- try { super[x] = 10; } catch(e) { ex = e; }
- assertTrue(ex instanceof TypeError);
- assertEquals(27, super[x]);
- assertEquals(27, this[x]);
- }.toMethod(Derived.prototype);
+ Derived.prototype = {
+ __proto__: Base.prototype,
+ constructor: Derived,
+ mSloppy() {
+ assertEquals(27, super[x]);
+ assertEquals(27, this[x]);
+ super[x] = 10;
+ assertEquals(27, super[x]);
+ assertEquals(27, this[x]);
+ },
+ mStrict() {
+ 'use strict';
+ assertEquals(27, super[x]);
+ assertEquals(27, this[x]);
+ var ex = null;
+ try { super[x] = 10; } catch(e) { ex = e; }
+ assertInstanceof(ex, TypeError);
+ assertEquals(27, super[x]);
+ assertEquals(27, this[x]);
+ }
+ };
new Derived().mSloppy();
new Derived().mStrict();
}());
-function Subclass(base, constructor) {
- var homeObject = {
- __proto__: base.prototype,
- constructor: constructor
- };
- constructor.__proto__ = base;
- constructor.prototype = homeObject;
- // not doing toMethod: home object is not required for
- // super constructor calls.
- return constructor;
-}
-
(function TestSuperCall() {
+ 'use strict';
+
var baseCalled = 0;
var derivedCalled = 0;
var derivedDerivedCalled = 0;
- function Base() {
- baseCalled++;
+ class Base {
+ constructor() {
+ baseCalled++;
+ }
}
- var Derived = Subclass(Base, function () {
- super();
- derivedCalled++;
- });
+ class Derived extends Base {
+ constructor() {
+ super();
+ derivedCalled++;
+ }
+ }
assertEquals(Base, Base.prototype.constructor);
assertEquals(Base.prototype, Derived.prototype.__proto__);
@@ -1770,10 +1992,12 @@ function Subclass(base, constructor) {
assertEquals(1, baseCalled);
assertEquals(1, derivedCalled);
- var DerivedDerived = Subclass(Derived, function () {
- super();
- derivedDerivedCalled++;
- });
+ class DerivedDerived extends Derived {
+ constructor() {
+ super();
+ derivedDerivedCalled++;
+ }
+ }
baseCalled = 0;
derivedCalled = 0;
@@ -1783,32 +2007,33 @@ function Subclass(base, constructor) {
assertEquals(1, derivedCalled);
assertEquals(1, derivedDerivedCalled);
- function Base2(v) {
- this.fromBase = v;
+ class Base2 {
+ constructor(v) {
+ this.fromBase = v;
+ }
+ }
+ class Derived2 extends Base2 {
+ constructor(v1, v2) {
+ super(v1);
+ this.fromDerived = v2;
+ }
}
- var Derived2 = Subclass(Base2, function (v1, v2) {
- super(v1);
- this.fromDerived = v2;
- });
var d = new Derived2("base", "derived");
assertEquals("base", d.fromBase);
assertEquals("derived", d.fromDerived);
- function ImplicitSubclassOfFunction() {
- super();
- this.x = 123;
- }
-
- var o = new ImplicitSubclassOfFunction();
- assertEquals(123, o.x);
-
var calls = 0;
- function G() {
- calls++;
+ class G {
+ constructor() {
+ calls++;
+ }
}
- function F() {
- super();
+
+ class F extends Object {
+ constructor() {
+ super();
+ }
}
F.__proto__ = G;
new F();
@@ -1819,103 +2044,23 @@ function Subclass(base, constructor) {
}());
-(function TestNewSuper() {
- var baseCalled = 0;
- var derivedCalled = 0;
-
- function Base() {
- baseCalled++;
- this.x = 15;
- }
-
-
- var Derived = Subclass(Base, function() {
- baseCalled = 0;
- var b = new super();
- assertEquals(1, baseCalled)
- assertEquals(Base.prototype, b.__proto__);
- assertEquals(15, b.x);
- assertEquals(undefined, this.x);
- derivedCalled++;
- });
+(function TestExtendsObject() {
+ 'use strict';
+ class F extends Object { }
+ var f = new F(42);
- derivedCalled = 0;
- new Derived();
- assertEquals(1, derivedCalled);
+ // TODO(dslomov,arv): Fix this. BUG=v8:3886.
+ assertInstanceof(f, Number);
}());
-
(function TestSuperCallErrorCases() {
- function T() {
- super();
+ 'use strict';
+ class T extends Object {
+ constructor() {
+ super();
+ }
}
+
T.__proto__ = null;
- // Spec says ReferenceError here, but for other IsCallable failures
- // we throw TypeError.
- // Filed https://bugs.ecmascript.org/show_bug.cgi?id=3282
assertThrows(function() { new T(); }, TypeError);
-
- function T1() {
- var b = new super();
- }
- T1.__proto = null;
- assertThrows(function() { new T1(); }, TypeError);
-}());
-
-
-(function TestSuperCallSyntacticRestriction() {
- assertThrows(function() {
- function C() {
- var y;
- super();
- }
- new C();
- }, TypeError);
- assertThrows(function() {
- function C() {
- super(this.x);
- }
- new C();
- }, TypeError);
- assertThrows(function() {
- function C() {
- super(this);
- }
- new C();
- }, TypeError);
- assertThrows(function() {
- function C() {
- super(1, 2, Object.getPrototypeOf(this));
- }
- new C();
- }, TypeError);
- assertThrows(function() {
- function C() {
- { super(1, 2); }
- }; new C();
- }, TypeError);
- assertThrows(function() {
- function C() {
- if (1) super();
- }; new C();
- }, TypeError);
-
- function C1() {
- 'use strict';
- super();
- };
- new C1();
-
- function C2() {
- ; 'use strict';;;;;
- super();
- };
- new C2();
-
- function C3() {
- ; 'use strict';;;;;
- // This is a comment.
- super();
- }
- new C3();
}());
diff --git a/deps/v8/test/mjsunit/harmony/templates.js b/deps/v8/test/mjsunit/harmony/templates.js
index c339bb8cc7..a884f58fb6 100644
--- a/deps/v8/test/mjsunit/harmony/templates.js
+++ b/deps/v8/test/mjsunit/harmony/templates.js
@@ -505,3 +505,16 @@ var obj = {
assertEquals("\u00008", `\08`);
assertEquals("\u00009", `\09`);
})();
+
+
+(function testLegacyOctalEscapesInExpressions() {
+ // Allowed in sloppy expression
+ assertEquals("\x07", `${"\07"}`);
+
+ // Disallowed in template tail
+ assertThrows("`${\"\\07\"}\\07`", SyntaxError);
+
+ // Disallowed in strict expression
+ assertThrows("`${(function() { \"use strict\"; return \"\\07\"; })()}`",
+ SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/toMethod.js b/deps/v8/test/mjsunit/harmony/toMethod.js
index ad51b2ff38..81db5830c2 100644
--- a/deps/v8/test/mjsunit/harmony/toMethod.js
+++ b/deps/v8/test/mjsunit/harmony/toMethod.js
@@ -14,7 +14,7 @@
function ClassD() { }
assertEquals(1, f(1));
- var g = f.toMethod(ClassD.prototype);
+ var g = %ToMethod(f, ClassD.prototype);
assertEquals(1, g(1));
assertEquals(undefined, f[%HomeObjectSymbol()]);
assertEquals(ClassD.prototype, g[%HomeObjectSymbol()]);
@@ -33,22 +33,13 @@
var q = f(0);
assertEquals(2, q(1));
assertEquals(3, q(1));
- var g = q.toMethod(Derived.prototype);
+ var g = %ToMethod(q, Derived.prototype);
assertFalse(g === q);
assertEquals(4, g(1));
assertEquals(5, q(1));
}());
-(function TestErrorCases() {
- var sFun = Function.prototype.toMethod;
- assertThrows(function() { sFun.call({}); }, TypeError);
- assertThrows(function() { sFun.call({}, {}); }, TypeError);
- function f(){};
- assertThrows(function() { f.toMethod(1); }, TypeError);
-}());
-
-
(function TestPrototypeChain() {
var o = {};
var o1 = {};
@@ -56,11 +47,11 @@
function g() { }
- var fMeth = f.toMethod(o);
+ var fMeth = %ToMethod(f, o);
assertEquals(o, fMeth[%HomeObjectSymbol()]);
g.__proto__ = fMeth;
assertEquals(undefined, g[%HomeObjectSymbol()]);
- var gMeth = g.toMethod(o1);
+ var gMeth = %ToMethod(g, o1);
assertEquals(fMeth, gMeth.__proto__);
assertEquals(o, fMeth[%HomeObjectSymbol()]);
assertEquals(o1, gMeth[%HomeObjectSymbol()]);
@@ -82,7 +73,7 @@
}
var fBound = f.bind(o, 1, 2, 3);
- var fMeth = fBound.toMethod(p);
+ var fMeth = %ToMethod(fBound, p);
assertEquals(10, fMeth(4));
assertEquals(10, fMeth.call(p, 4));
var fBound1 = fBound.bind(o, 4);
@@ -100,7 +91,7 @@
assertEquals(15, f(o));
%OptimizeFunctionOnNextCall(f);
assertEquals(15, f(o));
- var g = f.toMethod({});
+ var g = %ToMethod(f, {});
var o1 = {y : 1024, x : "abc"};
assertEquals("abc", f(o1));
assertEquals("abc", g(o1));
@@ -110,6 +101,6 @@
function f() {}
Object.preventExtensions(f);
assertFalse(Object.isExtensible(f));
- var m = f.toMethod({});
+ var m = %ToMethod(f, {});
assertTrue(Object.isExtensible(m));
}());
diff --git a/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js b/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
new file mode 100644
index 0000000000..67493351a4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
@@ -0,0 +1,212 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ES6 extends the \uxxxx escape and also allows \u{xxxxx}.
+
+// Flags: --harmony-unicode-regexps --harmony-regexps
+
+function testRegexpHelper(r) {
+ assertTrue(r.test("foo"));
+ assertTrue(r.test("boo"));
+ assertFalse(r.test("moo"));
+}
+
+
+(function TestUnicodeEscapes() {
+ testRegexpHelper(/(\u0066|\u0062)oo/);
+ testRegexpHelper(/(\u0066|\u0062)oo/u);
+ testRegexpHelper(/(\u{0066}|\u{0062})oo/u);
+ testRegexpHelper(/(\u{66}|\u{000062})oo/u);
+
+ // Note that we need \\ inside a string, otherwise it's interpreted as a
+ // unicode escape inside a string.
+ testRegexpHelper(new RegExp("(\\u0066|\\u0062)oo"));
+ testRegexpHelper(new RegExp("(\\u0066|\\u0062)oo", "u"));
+ testRegexpHelper(new RegExp("(\\u{0066}|\\u{0062})oo", "u"));
+ testRegexpHelper(new RegExp("(\\u{66}|\\u{000062})oo", "u"));
+
+ // Though, unicode escapes via strings should work too.
+ testRegexpHelper(new RegExp("(\u0066|\u0062)oo"));
+ testRegexpHelper(new RegExp("(\u0066|\u0062)oo", "u"));
+ testRegexpHelper(new RegExp("(\u{0066}|\u{0062})oo", "u"));
+ testRegexpHelper(new RegExp("(\u{66}|\u{000062})oo", "u"));
+})();
+
+
+(function TestUnicodeEscapesInCharacterClasses() {
+ testRegexpHelper(/[\u0062-\u0066]oo/);
+ testRegexpHelper(/[\u0062-\u0066]oo/u);
+ testRegexpHelper(/[\u{0062}-\u{0066}]oo/u);
+ testRegexpHelper(/[\u{62}-\u{00000066}]oo/u);
+
+ // Note that we need \\ inside a string, otherwise it's interpreted as a
+ // unicode escape inside a string.
+ testRegexpHelper(new RegExp("[\\u0062-\\u0066]oo"));
+ testRegexpHelper(new RegExp("[\\u0062-\\u0066]oo", "u"));
+ testRegexpHelper(new RegExp("[\\u{0062}-\\u{0066}]oo", "u"));
+ testRegexpHelper(new RegExp("[\\u{62}-\\u{00000066}]oo", "u"));
+
+ // Though, unicode escapes via strings should work too.
+ testRegexpHelper(new RegExp("[\u0062-\u0066]oo"));
+ testRegexpHelper(new RegExp("[\u0062-\u0066]oo", "u"));
+ testRegexpHelper(new RegExp("[\u{0062}-\u{0066}]oo", "u"));
+ testRegexpHelper(new RegExp("[\u{62}-\u{00000066}]oo", "u"));
+})();
+
+
+(function TestBraceEscapesWithoutUnicodeFlag() {
+ // \u followed by illegal escape will be parsed as u. {x} will be the
+ // character count.
+ function helper1(r) {
+ assertFalse(r.test("fbar"));
+ assertFalse(r.test("fubar"));
+ assertTrue(r.test("fuubar"));
+ assertFalse(r.test("fuuubar"));
+ }
+ helper1(/f\u{2}bar/);
+ helper1(new RegExp("f\\u{2}bar"));
+
+ function helper2(r) {
+ assertFalse(r.test("fbar"));
+ assertTrue(r.test("fubar"));
+ assertTrue(r.test("fuubar"));
+ assertFalse(r.test("fuuubar"));
+ }
+
+ helper2(/f\u{1,2}bar/);
+ helper2(new RegExp("f\\u{1,2}bar"));
+
+ function helper3(r) {
+ assertTrue(r.test("u"));
+ assertTrue(r.test("{"));
+ assertTrue(r.test("2"));
+ assertTrue(r.test("}"));
+ assertFalse(r.test("q"));
+ assertFalse(r.test("("));
+ assertFalse(r.test(")"));
+ }
+ helper3(/[\u{2}]/);
+ helper3(new RegExp("[\\u{2}]"));
+})();
+
+
+(function TestInvalidEscapes() {
+ // Without the u flag, invalid unicode escapes and other invalid escapes are
+ // treated as identity escapes.
+ function helper1(r) {
+ assertTrue(r.test("firstuxz89second"));
+ }
+ helper1(/first\u\x\z\8\9second/);
+ helper1(new RegExp("first\\u\\x\\z\\8\\9second"));
+
+ function helper2(r) {
+ assertTrue(r.test("u"));
+ assertTrue(r.test("x"));
+ assertTrue(r.test("z"));
+ assertTrue(r.test("8"));
+ assertTrue(r.test("9"));
+ assertFalse(r.test("q"));
+ assertFalse(r.test("7"));
+ }
+ helper2(/[\u\x\z\8\9]/);
+ helper2(new RegExp("[\\u\\x\\z\\8\\9]"));
+
+ // However, with the u flag, these are treated as invalid escapes.
+ assertThrows("/\\u/u", SyntaxError);
+ assertThrows("/\\u12/u", SyntaxError);
+ assertThrows("/\\ufoo/u", SyntaxError);
+ assertThrows("/\\x/u", SyntaxError);
+ assertThrows("/\\xfoo/u", SyntaxError);
+ assertThrows("/\\z/u", SyntaxError);
+ assertThrows("/\\8/u", SyntaxError);
+ assertThrows("/\\9/u", SyntaxError);
+
+ assertThrows("new RegExp('\\\\u', 'u')", SyntaxError);
+ assertThrows("new RegExp('\\\\u12', 'u')", SyntaxError);
+ assertThrows("new RegExp('\\\\ufoo', 'u')", SyntaxError);
+ assertThrows("new RegExp('\\\\x', 'u')", SyntaxError);
+ assertThrows("new RegExp('\\\\xfoo', 'u')", SyntaxError);
+ assertThrows("new RegExp('\\\\z', 'u')", SyntaxError);
+ assertThrows("new RegExp('\\\\8', 'u')", SyntaxError);
+ assertThrows("new RegExp('\\\\9', 'u')", SyntaxError);
+})();
+
+
+(function TestTooBigHexEscape() {
+ // The hex number inside \u{} has a maximum value.
+ /\u{10ffff}/u
+ new RegExp("\\u{10ffff}", "u")
+ assertThrows("/\\u{110000}/u", SyntaxError);
+ assertThrows("new RegExp('\\\\u{110000}', 'u')", SyntaxError);
+
+ // Without the u flag, they're of course fine ({x} is the count).
+ /\u{110000}/
+ new RegExp("\\u{110000}")
+})();
+
+
+(function TestSyntaxEscapes() {
+ // Syntax escapes work the same with or without the u flag.
+ function helper(r) {
+ assertTrue(r.test("foo[bar"));
+ assertFalse(r.test("foo]bar"));
+ }
+ helper(/foo\[bar/);
+ helper(new RegExp("foo\\[bar"));
+ helper(/foo\[bar/u);
+ helper(new RegExp("foo\\[bar", "u"));
+})();
+
+
+(function TestUnicodeSurrogates() {
+ // U+10E6D corresponds to the surrogate pair [U+D803, U+DE6D].
+ function helper(r) {
+ assertTrue(r.test("foo\u{10e6d}bar"));
+ }
+ helper(/foo\ud803\ude6dbar/u);
+ helper(new RegExp("foo\\ud803\\ude6dbar", "u"));
+})();
+
+
+(function AllFlags() {
+ // Test that we can pass all possible regexp flags and they work properly.
+ function helper1(r) {
+ assertTrue(r.global);
+ assertTrue(r.ignoreCase);
+ assertTrue(r.multiline);
+ assertTrue(r.sticky);
+ assertTrue(r.unicode);
+ }
+
+ helper1(/foo/gimyu);
+ helper1(new RegExp("foo", "gimyu"));
+
+ function helper2(r) {
+ assertFalse(r.global);
+ assertFalse(r.ignoreCase);
+ assertFalse(r.multiline);
+ assertFalse(r.sticky);
+ assertFalse(r.unicode);
+ }
+
+ helper2(/foo/);
+ helper2(new RegExp("foo"));
+})();
+
+
+(function DuplicatedFlags() {
+ // Test that duplicating the u flag is not allowed.
+ assertThrows("/foo/ugu");
+ assertThrows("new RegExp('foo', 'ugu')");
+})();
+
+
+(function ToString() {
+ // Test that the u flag is included in the string representation of regexps.
+ function helper(r) {
+ assertEquals(r.toString(), "/foo/u");
+ }
+ helper(/foo/u);
+ helper(new RegExp("foo", "u"));
+})();
diff --git a/deps/v8/test/mjsunit/mirror-object.js b/deps/v8/test/mjsunit/mirror-object.js
index 91d0f82fe4..834d7a580a 100644
--- a/deps/v8/test/mjsunit/mirror-object.js
+++ b/deps/v8/test/mjsunit/mirror-object.js
@@ -209,14 +209,14 @@ mirror = debug.MakeMirror(o);
// a has getter but no setter.
assertTrue(mirror.property('a').hasGetter());
assertFalse(mirror.property('a').hasSetter());
-assertEquals(debug.PropertyType.Callbacks, mirror.property('a').propertyType());
+assertEquals(debug.PropertyType.AccessorConstant, mirror.property('a').propertyType());
assertEquals('function', mirror.property('a').getter().type());
assertEquals('undefined', mirror.property('a').setter().type());
assertEquals('function (){return \'a\';}', mirror.property('a').getter().source());
// b has setter but no getter.
assertFalse(mirror.property('b').hasGetter());
assertTrue(mirror.property('b').hasSetter());
-assertEquals(debug.PropertyType.Callbacks, mirror.property('b').propertyType());
+assertEquals(debug.PropertyType.AccessorConstant, mirror.property('b').propertyType());
assertEquals('undefined', mirror.property('b').getter().type());
assertEquals('function', mirror.property('b').setter().type());
assertEquals('function (){}', mirror.property('b').setter().source());
@@ -224,7 +224,7 @@ assertFalse(mirror.property('b').isException());
// c has both getter and setter. The getter throws an exception.
assertTrue(mirror.property('c').hasGetter());
assertTrue(mirror.property('c').hasSetter());
-assertEquals(debug.PropertyType.Callbacks, mirror.property('c').propertyType());
+assertEquals(debug.PropertyType.AccessorConstant, mirror.property('c').propertyType());
assertEquals('function', mirror.property('c').getter().type());
assertEquals('function', mirror.property('c').setter().type());
assertEquals('function (){throw \'c\';}', mirror.property('c').getter().source());
diff --git a/deps/v8/test/mjsunit/mirror-regexp.js b/deps/v8/test/mjsunit/mirror-regexp.js
index d6a9d71024..6c251d4ff6 100644
--- a/deps/v8/test/mjsunit/mirror-regexp.js
+++ b/deps/v8/test/mjsunit/mirror-regexp.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --harmony-unicode-regexps
// Test the mirror object for regular expression values
var all_attributes = debug.PropertyAttribute.ReadOnly |
@@ -36,6 +36,7 @@ var expected_attributes = {
'global': all_attributes,
'ignoreCase': all_attributes,
'multiline': all_attributes,
+ 'unicode' : all_attributes,
'lastIndex': debug.PropertyAttribute.DontEnum | debug.PropertyAttribute.DontDelete
};
@@ -108,3 +109,4 @@ testRegExpMirror(/x/);
testRegExpMirror(/[abc]/);
testRegExpMirror(/[\r\n]/g);
testRegExpMirror(/a*b/gmi);
+testRegExpMirror(/(\u{0066}|\u{0062})oo/u);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index b360425c61..f50319d6aa 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -202,8 +202,8 @@ var assertUnoptimized;
if (a === 0) return (1 / a) === (1 / b);
return true;
}
- if (typeof a != typeof b) return false;
- if (typeof a == "number") return isNaN(a) && isNaN(b);
+ if (typeof a !== typeof b) return false;
+ if (typeof a === "number") return isNaN(a) && isNaN(b);
if (typeof a !== "object" && typeof a !== "function") return false;
// Neither a nor b is primitive.
var objectClass = classOf(a);
@@ -216,7 +216,7 @@ var assertUnoptimized;
if (objectClass === "Function") return false;
if (objectClass === "Array") {
var elementCount = 0;
- if (a.length != b.length) {
+ if (a.length !== b.length) {
return false;
}
for (var i = 0; i < a.length; i++) {
@@ -224,8 +224,8 @@ var assertUnoptimized;
}
return true;
}
- if (objectClass == "String" || objectClass == "Number" ||
- objectClass == "Boolean" || objectClass == "Date") {
+ if (objectClass === "String" || objectClass === "Number" ||
+ objectClass === "Boolean" || objectClass === "Date") {
if (a.valueOf() !== b.valueOf()) return false;
}
return deepObjectEquals(a, b);
@@ -235,7 +235,7 @@ var assertUnoptimized;
// TODO(mstarzinger): We should think about using Harmony's egal operator
// or the function equivalent Object.is() here.
if (found === expected) {
- if (expected !== 0 || (1 / expected) == (1 / found)) return;
+ if (expected !== 0 || (1 / expected) === (1 / found)) return;
} else if ((expected !== expected) && (found !== found)) {
return;
}
@@ -262,7 +262,7 @@ var assertUnoptimized;
start = name_opt + " - ";
}
assertEquals(expected.length, found.length, start + "array length");
- if (expected.length == found.length) {
+ if (expected.length === found.length) {
for (var i = 0; i < expected.length; ++i) {
assertEquals(expected[i], found[i],
start + "array element at index " + i);
@@ -282,7 +282,7 @@ var assertUnoptimized;
assertToStringEquals = function assertToStringEquals(expected, found,
name_opt) {
- if (expected != String(found)) {
+ if (expected !== String(found)) {
fail(expected, found, name_opt);
}
};
@@ -315,14 +315,14 @@ var assertUnoptimized;
assertThrows = function assertThrows(code, type_opt, cause_opt) {
var threwException = true;
try {
- if (typeof code == 'function') {
+ if (typeof code === 'function') {
code();
} else {
eval(code);
}
threwException = false;
} catch (e) {
- if (typeof type_opt == 'function') {
+ if (typeof type_opt === 'function') {
assertInstanceof(e, type_opt);
}
if (arguments.length >= 3) {
@@ -339,7 +339,7 @@ var assertUnoptimized;
if (!(obj instanceof type)) {
var actualTypeName = null;
var actualConstructor = Object.getPrototypeOf(obj).constructor;
- if (typeof actualConstructor == "function") {
+ if (typeof actualConstructor === "function") {
actualTypeName = actualConstructor.name || String(actualConstructor);
}
fail("Object <" + PrettyPrint(obj) + "> is not an instance of <" +
@@ -351,7 +351,7 @@ var assertUnoptimized;
assertDoesNotThrow = function assertDoesNotThrow(code, name_opt) {
try {
- if (typeof code == 'function') {
+ if (typeof code === 'function') {
code();
} else {
eval(code);
@@ -386,12 +386,12 @@ var assertUnoptimized;
assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt) {
if (sync_opt === undefined) sync_opt = "";
- assertTrue(OptimizationStatus(fun, sync_opt) != 1, name_opt);
+ assertTrue(OptimizationStatus(fun, sync_opt) !== 1, name_opt);
}
assertOptimized = function assertOptimized(fun, sync_opt, name_opt) {
if (sync_opt === undefined) sync_opt = "";
- assertTrue(OptimizationStatus(fun, sync_opt) != 2, name_opt);
+ assertTrue(OptimizationStatus(fun, sync_opt) !== 2, name_opt);
}
})();
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 26ec10ba66..dc96a1de35 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -67,8 +67,10 @@
'array-feedback': [PASS, NO_VARIANTS],
'compare-known-objects-slow': [PASS, NO_VARIANTS],
'elements-kind': [PASS, NO_VARIANTS],
+ 'opt-elements-kind': [PASS, NO_VARIANTS],
# Some tests are just too slow to run for now.
+ 'big-object-literal': [PASS, NO_VARIANTS],
'bit-not': [PASS, NO_VARIANTS],
'json2': [PASS, NO_VARIANTS],
'packed-elements': [PASS, NO_VARIANTS],
@@ -76,52 +78,42 @@
'whitespaces': [PASS, NO_VARIANTS],
'compiler/osr-assert': [PASS, NO_VARIANTS],
'regress/regress-2185-2': [PASS, NO_VARIANTS],
+ 'regress/regress-2612': [PASS, NO_VARIANTS],
+
+ # Modules are busted
+ 'harmony/module-linking': [SKIP],
+ 'harmony/module-recompile': [SKIP],
+ 'harmony/module-resolution': [SKIP],
+ 'harmony/regress/regress-343928': [SKIP],
# Issue 3660: Replacing activated TurboFan frames by unoptimized code does
# not work, but we expect it to not crash.
'debug-step-turbofan': [PASS, FAIL],
- # Support for %GetFrameDetails is missing and requires checkpoints.
- 'debug-evaluate-bool-constructor': [PASS, NO_VARIANTS],
+ # TODO(jarin/mstarzinger): Investigate debugger issues with TurboFan.
'debug-evaluate-const': [PASS, NO_VARIANTS],
- 'debug-evaluate-locals-optimized-double': [PASS, NO_VARIANTS],
- 'debug-evaluate-locals-optimized': [PASS, NO_VARIANTS],
'debug-evaluate-locals': [PASS, NO_VARIANTS],
- 'debug-evaluate-with-context': [PASS, NO_VARIANTS],
+ 'debug-liveedit-check-stack': [PASS, NO_VARIANTS], # only in no-snap mode.
'debug-liveedit-double-call': [PASS, NO_VARIANTS],
- 'debug-liveedit-restart-frame': [PASS, NO_VARIANTS],
- 'debug-return-value': [PASS, NO_VARIANTS],
- 'debug-set-variable-value': [PASS, NO_VARIANTS],
'debug-step-stub-callfunction': [PASS, NO_VARIANTS],
+ 'debug-set-variable-value': [PASS, NO_VARIANTS],
'debug-stepin-accessor': [PASS, NO_VARIANTS],
'debug-stepin-builtin': [PASS, NO_VARIANTS],
'debug-stepin-constructor': [PASS, NO_VARIANTS],
'debug-stepin-function-call': [PASS, NO_VARIANTS],
'debug-stepnext-do-while': [PASS, NO_VARIANTS],
- 'debug-stepout-recursive-function': [PASS, NO_VARIANTS],
'debug-stepout-scope-part1': [PASS, NO_VARIANTS],
'debug-stepout-scope-part2': [PASS, NO_VARIANTS],
'debug-stepout-scope-part3': [PASS, NO_VARIANTS],
- 'debug-stepout-scope-part7': [PASS, NO_VARIANTS],
- 'debug-stepout-to-builtin': [PASS, NO_VARIANTS],
- 'es6/debug-promises/throw-in-constructor': [PASS, NO_VARIANTS],
- 'es6/debug-promises/reject-in-constructor': [PASS, NO_VARIANTS],
- 'es6/debug-promises/throw-with-undefined-reject': [PASS, NO_VARIANTS],
- 'es6/debug-promises/throw-with-throw-in-reject': [PASS, NO_VARIANTS],
- 'es6/debug-promises/reject-with-throw-in-reject': [PASS, NO_VARIANTS],
- 'es6/debug-promises/throw-uncaught-all': [PASS, NO_VARIANTS],
- 'es6/debug-promises/throw-uncaught-uncaught': [PASS, NO_VARIANTS],
- 'es6/debug-promises/reject-uncaught-late': [PASS, NO_VARIANTS],
- 'es6/debug-promises/throw-caught-by-default-reject-handler': [PASS, NO_VARIANTS],
- 'es6/generators-debug-scopes': [PASS, NO_VARIANTS],
- 'harmony/debug-blockscopes': [PASS, NO_VARIANTS],
- 'regress/regress-1081309': [PASS, NO_VARIANTS],
- 'regress/regress-269': [PASS, NO_VARIANTS],
- 'regress/regress-crbug-259300': [PASS, NO_VARIANTS],
- 'regress/regress-frame-details-null-receiver': [PASS, NO_VARIANTS],
+ 'es6/debug-stepin-microtasks': [PASS, NO_VARIANTS],
+ 'es6/debug-stepnext-for': [PASS, NO_VARIANTS],
+ 'harmony/debug-evaluate-blockscopes': [PASS, NO_VARIANTS],
- # TODO(arv): TurboFan does not yet add [[HomeObject]] as needed.
- 'harmony/object-literals-super': [PASS, NO_VARIANTS],
+ # TODO(jarin): Some tests don't like --turbo-deoptimzation very much.
+ 'asm/embenchen/lua_binarytrees': [SKIP],
+ 'es6/symbols': [PASS, NO_VARIANTS],
+ 'regress/regress-354433': [PASS, NO_VARIANTS], # only on ARM simulator.
+ 'regress/regress-crbug-259300': [PASS, NO_VARIANTS],
##############################################################################
# Too slow in debug mode with --stress-opt mode.
@@ -257,6 +249,12 @@
}], # 'gc_stress == True'
##############################################################################
+['byteorder == big', {
+ # Emscripten requires little-endian, skip all tests on big endian platforms.
+ 'asm/embenchen/*': [SKIP],
+}], # 'byteorder == big'
+
+##############################################################################
['arch == arm64 or arch == android_arm64', {
# arm64 TF timeout.
@@ -445,9 +443,6 @@
['arch == mips', {
# Flaky with TF.
'mirror-script': [PASS, NO_VARIANTS],
-
- # Emscripten requires little-endian, skip all tests on MIPS EB.
- 'asm/embenchen/*': [SKIP],
}], # 'arch == mips'
##############################################################################
@@ -574,6 +569,12 @@
}], # 'arch == nacl_ia32 or arch == nacl_x64'
##############################################################################
+['arch == x87', {
+ # Currently Turbofan is not supported by x87.
+ 'compiler/opt-next-call-turbo': [SKIP],
+}], # 'arch == x87'
+
+##############################################################################
['deopt_fuzzer == True', {
# Skip tests that are not suitable for deoptimization fuzzing.
diff --git a/deps/v8/test/mjsunit/object-literal-multiple-fields.js b/deps/v8/test/mjsunit/object-literal-multiple-fields.js
new file mode 100644
index 0000000000..f36d14d074
--- /dev/null
+++ b/deps/v8/test/mjsunit/object-literal-multiple-fields.js
@@ -0,0 +1,96 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function TestConstants() {
+ var o = {
+ p: 1,
+ p: 2,
+ };
+ assertEquals(2, o.p);
+})();
+
+
+(function TestMaterialized() {
+ var o = {
+ p: [1],
+ p: [2],
+ };
+ assertEquals(2, o.p[0]);
+})();
+
+
+(function TestMaterialize2() {
+ var o = {
+ p: function() {},
+ p: 2,
+ };
+ assertEquals(2, o.p);
+})();
+
+
+
+(function TestComputed() {
+ var o = {
+ p: (function() { return 1; })(),
+ p: (function() { return 2; })(),
+ };
+ assertEquals(2, o.p);
+})();
+
+
+(function TestComputed2() {
+ var o = {
+ p: (function() { return 1; })(),
+ p: 2,
+ };
+ assertEquals(2, o.p);
+})();
+
+
+
+(function TestGetter() {
+ var o = {
+ get p() { return 1; },
+ get p() { return 2; },
+ };
+ assertEquals(2, o.p);
+})();
+
+
+(function TestGetterSetter() {
+ var o = {
+ get p() { return 1; },
+ set p(_) {},
+ };
+ assertEquals(1, o.p);
+
+ o = {
+ set p(_) {},
+ get p() { return 2; },
+ };
+ assertEquals(2, o.p);
+})();
+
+
+(function TestCombined() {
+ var o = {
+ get p() { return 1; },
+ p: 2,
+ };
+ assertEquals(2, o.p);
+
+ o = {
+ get p() { return 1; },
+ p: 2,
+ get p() { return 3; },
+ };
+ assertEquals(3, o.p);
+
+ o = {
+ get p() { return 1; },
+ p: 2,
+ set p(_) {},
+ };
+ assertEquals(undefined, o.p);
+})();
diff --git a/deps/v8/test/mjsunit/object-literal-multiple-proto-fields.js b/deps/v8/test/mjsunit/object-literal-multiple-proto-fields.js
new file mode 100644
index 0000000000..1ab608bde2
--- /dev/null
+++ b/deps/v8/test/mjsunit/object-literal-multiple-proto-fields.js
@@ -0,0 +1,21 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var p1 = {};
+var p2 = {};
+var p3 = {};
+var x = 0;
+var y = 1;
+var z = 2;
+var o = 3;
+assertThrows(
+ 'o = {' +
+ ' __proto__: (x++, p1),' +
+ ' __proto__: (y++, p2),' +
+ ' __proto__: (z++, p3)' +
+ '};', SyntaxError);
+assertEquals(0, x);
+assertEquals(1, y);
+assertEquals(2, z);
+assertEquals(3, o);
diff --git a/deps/v8/test/mjsunit/osr-elements-kind.js b/deps/v8/test/mjsunit/osr-elements-kind.js
index 518b984743..389b6dac6f 100644
--- a/deps/v8/test/mjsunit/osr-elements-kind.js
+++ b/deps/v8/test/mjsunit/osr-elements-kind.js
@@ -88,11 +88,10 @@ function assertKind(expected, obj, name_opt) {
assertEquals(expected, getKind(obj), name_opt);
}
-// long-running loop forces OSR.
%NeverOptimizeFunction(construct_smis);
%NeverOptimizeFunction(construct_doubles);
%NeverOptimizeFunction(convert_mixed);
-for (var i = 0; i < 1000000; i++) { }
+for (var i = 0; i < 10; i++) { if (i == 5) %OptimizeOsr(); }
// This code exists to eliminate the learning influence of AllocationSites
// on the following tests.
diff --git a/deps/v8/test/mjsunit/property-name-eval-arguments.js b/deps/v8/test/mjsunit/property-name-eval-arguments.js
new file mode 100644
index 0000000000..ebb07485c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/property-name-eval-arguments.js
@@ -0,0 +1,59 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+(function TestSloppyMode() {
+ var e = 1, a = 2;
+ var o = {
+ get eval() {
+ return e;
+ },
+ set eval(v) {
+ e = v;
+ },
+ get arguments() {
+ return a;
+ },
+ set arguments(v) {
+ a = v;
+ },
+ };
+
+ assertEquals(1, o.eval);
+ o.eval = 3;
+ assertEquals(3, e);
+
+ assertEquals(2, o.arguments);
+ o.arguments = 4;
+ assertEquals(4, a);
+})();
+
+
+(function TestStrictMode() {
+ 'use strict';
+
+ var e = 1, a = 2;
+ var o = {
+ get eval() {
+ return e;
+ },
+ set eval(v) {
+ e = v;
+ },
+ get arguments() {
+ return a;
+ },
+ set arguments(v) {
+ a = v;
+ },
+ };
+
+ assertEquals(1, o.eval);
+ o.eval = 3;
+ assertEquals(3, e);
+
+ assertEquals(2, o.arguments);
+ o.arguments = 4;
+ assertEquals(4, a);
+})();
diff --git a/deps/v8/test/mjsunit/regress-sync-optimized-lists.js b/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
index f07c12b2cb..2ce60aa836 100644
--- a/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
+++ b/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
@@ -13,9 +13,7 @@ function get_closure() {
return function add_field(obj, osr) {
obj.c = 3;
var x = 0;
- if (osr) {
- %OptimizeFunctionOnNextCall(add_field, "osr");
- }
+ if (osr) %OptimizeOsr();
for (var i = 0; i < 10; i++) {
x = i + 1;
}
diff --git a/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
index fb7280a0d1..8d60e9015e 100644
--- a/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
+++ b/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
@@ -31,7 +31,7 @@
function f(a, deopt, osr) {
var result = (a + 10, "result");
var dummy = deopt + 0;
- if (osr) while (%GetOptimizationStatus(f) == 2) {}
+ for (var i = 0; osr && i < 2; i++) %OptimizeOsr();
return result;
}
diff --git a/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
index 9a36c141b7..704af744d2 100644
--- a/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
+++ b/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
@@ -31,7 +31,7 @@ function f(deopt, osr) {
var result = "result";
%_CallFunction(0, 0, function() {});
var dummy = deopt + 0;
- if (osr) while (%GetOptimizationStatus(f) == 2) {}
+ for (var i = 0; osr && i < 2; i++) %OptimizeOsr();
return result;
}
diff --git a/deps/v8/test/mjsunit/regress/regress-1118.js b/deps/v8/test/mjsunit/regress/regress-1118.js
index 4fd23456be..05b192d6f3 100644
--- a/deps/v8/test/mjsunit/regress/regress-1118.js
+++ b/deps/v8/test/mjsunit/regress/regress-1118.js
@@ -41,24 +41,10 @@ var o = new A();
// inlined.
function g() { try { return o.f(); } finally { }}
-// Optimization status (see runtime.cc):
-// 1 - yes, 2 - no, 3 - always, 4 - never.
-
// This function should be optimized via OSR.
function h() {
- var optstatus = %GetOptimizationStatus(h);
- if (optstatus == 4) {
- // Optimizations are globally disabled; just run once.
- g();
- } else {
- // Run for a bit as long as h is unoptimized.
- if (%GetOptimizationStatus(h) != 4) {
- while (%GetOptimizationCount(h) == 0) {
- for (var j = 0; j < 100; j++) g();
- }
- }
- g();
- }
+ for (var i = 0; i < 10; i++) %OptimizeOsr();
+ g();
}
h();
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index 363557bff8..b3cfffd92c 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -30,9 +30,7 @@
function f() {
do {
do {
- for (var i = 0; i < 10000000; i++) {
- // This should run long enough to trigger OSR.
- }
+ for (var i = 0; i < 10; i++) %OptimizeOsr();
} while (false);
} while (false);
}
@@ -57,7 +55,7 @@ function g() {
do {
do {
do {
- for (var i = 0; i < 10000000; i++) { }
+ for (var i = 0; i < 10; i++) %OptimizeOsr();
} while (false);
} while (false);
} while (false);
diff --git a/deps/v8/test/mjsunit/regress/regress-2825.js b/deps/v8/test/mjsunit/regress/regress-2825.js
new file mode 100644
index 0000000000..34348c911b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2825.js
@@ -0,0 +1,40 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Do not edit this file with an editor that replaces \r with \r\n.
+// Variable definitions for i0 through i3 are each terminated with \r.
+function f() {
+ var i0 = 0; var i1 = 1; var i2 = 2; var i3 = 3;
+ var j0 = 0;
+ var j1 = 1;
+ var j2 = 2;
+ var j3 = 3;
+}
+
+Debug = debug.Debug;
+var exception = null;
+var break_point_hit = false;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ break_point_hit = true;
+ assertEquals(" var i2 = 2;", exec_state.frame(0).sourceLineText());
+ } catch (e) {
+ print(e + e.stack);
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+Debug.setBreakPoint(f, 3, 0);
+
+f();
+
+Debug.setListener(null);
+assertTrue(break_point_hit);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-3032.js b/deps/v8/test/mjsunit/regress/regress-3032.js
index ae54543758..9b18e146ce 100644
--- a/deps/v8/test/mjsunit/regress/regress-3032.js
+++ b/deps/v8/test/mjsunit/regress/regress-3032.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-for (var i = 0; i < 1000000; i++) { }
+// Flags: --allow-natives-syntax
+
+for (var i = 0; i < 10; i++) { if (i == 5) %OptimizeOsr(); }
var xl = 4096;
var z = i % xl;
diff --git a/deps/v8/test/mjsunit/regress/regress-3501.js b/deps/v8/test/mjsunit/regress/regress-3501.js
new file mode 100644
index 0000000000..4b449e458f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3501.js
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-arrow-functions
+
+// See: http://code.google.com/p/v8/issues/detail?id=3501
+
+"use strict";
+let lift = f => (x, k) => k (f (x));
+lift(isNaN);
diff --git a/deps/v8/test/mjsunit/regress/regress-379770.js b/deps/v8/test/mjsunit/regress/regress-379770.js
index a6653c2591..ab1b339f7d 100644
--- a/deps/v8/test/mjsunit/regress/regress-379770.js
+++ b/deps/v8/test/mjsunit/regress/regress-379770.js
@@ -6,9 +6,7 @@
function foo(obj) {
var counter = 1;
- for (var i = 0; i < obj.length; i++) {
- %OptimizeFunctionOnNextCall(foo, "osr");
- }
+ for (var i = 0; i < obj.length; i++) %OptimizeOsr();
counter += obj;
return counter;
}
diff --git a/deps/v8/test/mjsunit/regress/regress-3859.js b/deps/v8/test/mjsunit/regress/regress-3859.js
new file mode 100644
index 0000000000..3248ef14ac
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3859.js
@@ -0,0 +1,6 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals(1, new Set([NaN, NaN, NaN]).size);
+assertEquals(42, new Map([[NaN, 42]]).get(NaN));
diff --git a/deps/v8/test/mjsunit/regress/regress-3865.js b/deps/v8/test/mjsunit/regress/regress-3865.js
new file mode 100644
index 0000000000..0d1d02f00d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3865.js
@@ -0,0 +1,14 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar() {
+ var radix = 10;
+ return 21 / radix | 0;
+}
+assertEquals(2, bar());
+assertEquals(2, bar());
+%OptimizeFunctionOnNextCall(bar);
+assertEquals(2, bar());
diff --git a/deps/v8/test/mjsunit/regress/regress-3884.js b/deps/v8/test/mjsunit/regress/regress-3884.js
new file mode 100644
index 0000000000..ecd000f6c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3884.js
@@ -0,0 +1,27 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+function f(x) {
+ // TurboFan will hoist the CompareIC for x === 'some_string' and spill it.
+ if (x === 'some_other_string_1' || x === 'some_string') {
+ gc();
+ }
+ if (x === 'some_other_string_2' || x === 'some_string') {
+ gc();
+ }
+ // TurboFan will hoist the CompareIC for x === 1.4 and spill it.
+ if (x === 1.7 || x === 1.4) {
+ gc();
+ }
+ if (x === 1.9 || x === 1.4) {
+ gc();
+ }
+}
+
+%OptimizeFunctionOnNextCall(f);
+
+f('some_other_string_1');
+f(1.7);
diff --git a/deps/v8/test/mjsunit/regress/regress-437713.js b/deps/v8/test/mjsunit/regress/regress-437713.js
new file mode 100644
index 0000000000..704dd3ed8a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-437713.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --enable-slow-asserts
+
+var o1 = {
+ a00:0, a01:0, a02:0, a03:0, a04:0, a05:0, a06:0, a07:0, a08:0, a09:0, a0a:0, a0b:0, a0c:0, a0d:0, a0e:0, a0f:0,
+ a10:0, a11:0, a12:0, a13:0, a14:0, a15:0, a16:0, a17:0, a18:0, a19:0, a1a:0, a1b:0, a1c:0, a1d:0, a1e:0, a1f:0,
+
+ dbl: 0.1,
+
+ some_double: 2.13,
+};
+
+var o2 = {
+ a00:0, a01:0, a02:0, a03:0, a04:0, a05:0, a06:0, a07:0, a08:0, a09:0, a0a:0, a0b:0, a0c:0, a0d:0, a0e:0, a0f:0,
+ a10:0, a11:0, a12:0, a13:0, a14:0, a15:0, a16:0, a17:0, a18:0, a19:0, a1a:0, a1b:0, a1c:0, a1d:0, a1e:0, a1f:0,
+
+ dbl: 0.1,
+
+ boom: [],
+};
+
+o2.boom.push(42);
+assertEquals(42, o2.boom[0]);
diff --git a/deps/v8/test/mjsunit/regress/regress-444805.js b/deps/v8/test/mjsunit/regress/regress-444805.js
new file mode 100644
index 0000000000..5a533acd5e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-444805.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+try {
+ load("test/mjsunit/regress/regress-444805.js-script");
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-444805.js-script b/deps/v8/test/mjsunit/regress/regress-444805.js-script
new file mode 100644
index 0000000000..17b233b561
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-444805.js-script
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Error.prepareStackTrace = function(dummyObject, v8StackTrace)
+{
+ throw new Error('boom');
+};
+
+
+throw new Error('just error');
diff --git a/deps/v8/test/mjsunit/regress/regress-446389.js b/deps/v8/test/mjsunit/regress/regress-446389.js
new file mode 100644
index 0000000000..d6006387e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-446389.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function runNearStackLimit(f) { function t() { try { t(); } catch(e) { f(); } }; try { t(); } catch(e) {} }
+%OptimizeFunctionOnNextCall(__f_3);
+function __f_3() {
+ var __v_5 = a[0];
+}
+runNearStackLimit(function() { __f_3(); });
diff --git a/deps/v8/test/mjsunit/regress/regress-447526.js b/deps/v8/test/mjsunit/regress/regress-447526.js
new file mode 100644
index 0000000000..9f9396f2ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-447526.js
@@ -0,0 +1,25 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar() {
+ throw "done";
+}
+
+function foo() {
+ var i;
+ while (i) {
+ while (i) {
+}
+ i++;
+ }
+ while (true) {
+ bar();
+ }
+}
+
+
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-447561.js b/deps/v8/test/mjsunit/regress/regress-447561.js
new file mode 100644
index 0000000000..0d7a321de0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-447561.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+__proto__ = /foo/gi;
+assertEquals("foo", source);
+assertTrue(global);
+assertTrue(ignoreCase);
+assertFalse(multiline);
+assertEquals(0, lastIndex);
diff --git a/deps/v8/test/mjsunit/regress/regress-448711.js b/deps/v8/test/mjsunit/regress/regress-448711.js
new file mode 100644
index 0000000000..b7628ab2eb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-448711.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function f() {
+ this.a = { text: "Hello!" };
+}
+var v4 = new f();
+var v7 = new f();
+v7.b = {};
+Object.defineProperty(v4, '2', {});
+var v6 = new f();
+v6.a = {};
diff --git a/deps/v8/test/mjsunit/regress/regress-449070.js b/deps/v8/test/mjsunit/regress/regress-449070.js
new file mode 100644
index 0000000000..7a0f0a838c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-449070.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+try {
+ %NormalizeElements(this);
+} catch(e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-449291.js b/deps/v8/test/mjsunit/regress/regress-449291.js
new file mode 100644
index 0000000000..fb56027b67
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-449291.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+a = {y:1.5};
+a.y = 1093445778;
+b = a.y;
+c = {y:{}};
+
+function f() {
+ return {y: b};
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(f().y, 1093445778);
diff --git a/deps/v8/test/mjsunit/regress/regress-450895.js b/deps/v8/test/mjsunit/regress/regress-450895.js
new file mode 100644
index 0000000000..48aa00d956
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-450895.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var v = new Array();
+Object.freeze(v);
+v = v.concat(0.5);
diff --git a/deps/v8/test/mjsunit/regress/regress-451322.js b/deps/v8/test/mjsunit/regress/regress-451322.js
new file mode 100644
index 0000000000..b7794f52f0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-451322.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var foo = 0;
+
+function bar() {
+ var baz = 0 - {};
+ if (foo > 24) return baz * 0;
+}
+
+bar();
+bar();
+%OptimizeFunctionOnNextCall(bar);
+bar();
diff --git a/deps/v8/test/mjsunit/regress/regress-451958.js b/deps/v8/test/mjsunit/regress/regress-451958.js
new file mode 100644
index 0000000000..33695f2b3e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-451958.js
@@ -0,0 +1,31 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function k() { throw "e"; }
+var a = true;
+var a = false;
+function foo(a) {
+ var i, j;
+ if (a) {
+ for (i = 0; i < 1; j++) ;
+ for (i = 0; i < 1; k()) ;
+ for (i = 0; i < 1; i++) ;
+ }
+}
+%OptimizeFunctionOnNextCall(foo);
+foo();
+
+function bar() {
+var __v_45;
+ for (__v_45 = 0; __v_45 < 64; __v_63++) {
+ }
+ for (__v_45 = 0; __v_45 < 128; __v_36++) {
+ }
+ for (__v_45 = 128; __v_45 < 256; __v_45++) {
+ }
+}
+%OptimizeFunctionOnNextCall(bar);
+assertThrows(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-453481.js b/deps/v8/test/mjsunit/regress/regress-453481.js
new file mode 100644
index 0000000000..2bc9e46433
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-453481.js
@@ -0,0 +1,127 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --always-opt
+
+var __v_0 = "";
+var __v_1 = {};
+var __v_2 = {};
+var __v_3 = {};
+var __v_4 = {};
+var __v_5 = {};
+var __v_6 = {};
+var __v_7 = {};
+var __v_8 = {};
+var __v_10 = {};
+var __v_13 = {};
+var __v_15 = {};
+var __v_16 = /abc/;
+var __v_17 = {};
+var __v_18 = function() {};
+var __v_19 = this;
+var __v_20 = {};
+var __v_21 = this;
+
+function __f_5(s) {
+ return __f_11(__f_3(__f_7(s), s.length * 8));
+}
+function __f_3(x, len) {
+ var __v_3 = 1732584193;
+ var __v_6 = -271733879;
+ var __v_5 = -1732584194;
+ var __v_7 = 271733892;
+
+ for (var i = 0; i < 1; i++) {
+ var __v_11 = __v_3;
+ var __v_14 = __v_6;
+ var __v_13 = __v_5;
+ var __v_15 = __v_7;
+
+ __v_3 = __f_10(__v_3, __v_6, __v_5, __v_7, x[__v_8+ 0], 6 , -198630844);
+ __v_7 = __f_10(__v_7, __v_3, __v_6, __v_5, x[__v_8+ 7], 10, 1126891415);
+ __v_5 = __f_10(__v_5, __v_7, __v_3, __v_6, x[__v_8+14], 15, -1416354905);
+ __v_6 = __f_10(__v_6, __v_5, __v_7, __v_3, x[__v_8+ 5], 21, -57434055);
+ __v_3 = __f_10(__v_3, __v_6, __v_5, __v_7, x[__v_8+12], 6 , 1700485571);
+ __v_7 = __f_10(__v_7, __v_3, __v_6, __v_5, x[__v_8+ 3], 10, -1894986606);
+ __v_5 = __f_10(__v_5, __v_7, __v_3, __v_6, x[__v_8+10], 15, -1051523);
+ __v_6 = __f_10(__v_6, __v_5, __v_7, __v_3, x[__v_8+ 1], 21, -2054922799);
+ __v_3 = __f_10(__v_3, __v_6, __v_5, __v_7, x[__v_8+ 8], 6 , 1873313359);
+ __v_7 = __f_10(__v_7, __v_3, __v_6, __v_5, x[__v_8+15], 10, -30611744);
+ __v_5 = __f_10(__v_5, __v_7, __v_3, __v_6, x[__v_8+ 22], 14, -1560198371);
+ __v_3 = __f_10(__v_3, __v_6, __v_5, __v_7, x[__v_8+ 4], 6 , -145523070);
+ __v_7 = __f_10(__v_7, __v_3, __v_6, __v_5, x[__v_8+11], 10, -1120210379);
+ __v_5 = __f_10(__v_5, __v_7, __v_3, __v_6, x[__v_8+ 2], 15, 718787259);
+ __v_6 = __f_10(__v_13, __v_5, __v_7, __v_3, x[__v_8+ 9], 21, -343485551);
+ __v_3 = __f_6(__v_3, __v_11);
+ __v_6 = __f_6(__v_6, __v_14);
+ __v_5 = __f_6(__v_5, __v_13);
+ __v_7 = __f_6(__v_7, __v_15);
+
+ }
+
+ return Array(__v_3, __v_13, __v_4, __v_19);
+}
+function __f_4(q, __v_3, __v_6, x, s, t) {
+ return __f_6(__f_12(__f_6(__f_6(__v_3, q), __f_6(x, t)), s),__v_6);
+}
+function __f_13(__v_3, __v_6, __v_5, __v_7, x, s, t) {
+ return __f_4((__v_6 & __v_5) | ((~__v_6) & __v_7), __v_3, __v_6, x, s, t);
+}
+function __f_8(__v_3, __v_6, __v_5, __v_7, x, s, t) {
+ return __f_4((__v_6 & __v_7) | (__v_5 & (~__v_7)), __v_3, __v_6, x, s, t);
+}
+function __f_9(__v_3, __v_6, __v_5, __v_7, x, s, t) {
+ return __f_4(__v_6 ^ __v_5 ^ __v_7, __v_3, __v_6, x, s, t);
+}
+function __f_10(__v_3, __v_6, __v_5, __v_7, x, s, t) {
+ return __f_4(__v_5 ^ (__v_6 | (~__v_7)), __v_3, __v_6, x, s, t);
+}
+function __f_6(x, y) {
+ var __v_12 = (x & 0xFFFF) + (y & 0xFFFF);
+ var __v_18 = (x >> 16) + (y >> 16) + (__v_12 >> 16);
+ return (__v_18 << 16) | (__v_12 & 0xFFFF);
+}
+function __f_12(num, cnt) {
+ return (num << cnt) | (num >>> (32 - cnt));
+}
+function __f_7(__v_16) {
+ var __v_4 = Array();
+ var __v_9 = (1 << 8) - 1;
+ for(var __v_8 = 0; __v_8 < __v_16.length * 8; __v_8 += 8)
+ __v_4[__v_8>>5] |= (__v_16.charCodeAt(__v_8 / 8) & __v_9) << (__v_8%32);
+ return __v_4;
+}
+
+function __f_11(binarray) { return __v_16; }
+
+try {
+__v_10 = "Rebellious subjects, enemies to peace,\n\
+Profaners of this neighbour-stained steel,--\n\
+Will they not hear? What, ho! you men, you beasts,\n\
+That quench the fire of your pernicious rage\n\
+With purple fountains issuing from your veins,\n\
+On pain of torture, from those bloody hands\n\
+Throw your mistemper'__v_7 weapons to the ground,\n\
+And hear the sentence of your moved prince.\n\
+Three civil brawls, bred of an airy word,\n\
+By thee, old Capulet, and Montague,\n\
+Have thrice disturb'__v_7 the quiet of our streets,\n\
+And made Verona's ancient citizens\n\
+Cast by their grave beseeming ornaments,\n\
+To wield old partisans, in hands as old,\n\
+Canker'__v_7 with peace, to part your canker'__v_7 hate:\n\
+If ever you disturb our streets again,\n\
+Your lives shall pay the forfeit of the peace.\n\
+For this time, all the rest depart away:\n\
+You Capulet; shall go along with me:\n\
+And, Montague, come you this afternoon,\n\
+To know our further pleasure in this case,\n\
+To old Free-town, our common judgment-place.\n\
+Once more, on pain of death, all men depart.\n"
+ function assertEquals(a, b) { }
+for (var __v_8 = 0; __v_8 < 11; ++__v_8) {
+ assertEquals(__f_5(__v_10), "1b8719c72d5d8bfd06e096ef6c6288c5");
+}
+
+} catch(e) { print("Caught: " + e); }
diff --git a/deps/v8/test/mjsunit/regress/regress-454725.js b/deps/v8/test/mjsunit/regress/regress-454725.js
new file mode 100644
index 0000000000..a2469d11a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-454725.js
@@ -0,0 +1,42 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --expose-gc
+
+var __v_9 = {};
+var depth = 15;
+var current = 0;
+
+function __f_15(__v_3) {
+ if ((__v_3 % 50) != 0) {
+ return __v_3;
+ } else {
+ return __v_9 + 0.5;
+ }
+}
+function __f_13(a) {
+ a[100000 - 2] = 1;
+ for (var __v_3= 0; __v_3 < 70000; ++__v_3 ) {
+ a[__v_3] = __f_15(__v_3);
+ }
+}
+function __f_2(size) {
+
+}
+var tmp;
+function __f_18(allocator) {
+ current++;
+ if (current == depth) return;
+ var __v_7 = new allocator(100000);
+ __f_13(__v_7);
+ var __v_4 = 6;
+ for (var __v_3= 0; __v_3 < 70000; __v_3 += 501 ) {
+ tmp += __v_3;
+ }
+ __f_18(Array);
+ current--;
+}
+
+gc();
+__f_18(__f_2);
diff --git a/deps/v8/test/mjsunit/regress/regress-634.js b/deps/v8/test/mjsunit/regress/regress-455212.js
index b68e843740..f2fd033c3d 100644
--- a/deps/v8/test/mjsunit/regress/regress-634.js
+++ b/deps/v8/test/mjsunit/regress/regress-455212.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,8 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-for (var i = 0; i < 1000000; i++) {
- a = new Array(0);
- assertEquals(0, a.length);
- assertEquals(0, a.length);
-}
+// Typeof expression must resolve to 'undefined' when it used on a
+// non-existing property. It is *not* allowed to throw a
+// ReferenceError.
+
+// eval("\u0060\u005c") is an unterminated template string (\u0060)
+assertThrows("\u0060\u005c", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-457935.js b/deps/v8/test/mjsunit/regress/regress-457935.js
new file mode 100644
index 0000000000..d34db05de4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-457935.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function dummy(x) { };
+
+function g() {
+ return g.arguments;
+}
+
+function f(limit) {
+ var i = 0;
+ var o = {};
+ for (; i < limit; i++) {
+ o.y = +o.y;
+ g();
+ }
+}
+
+f(1);
+f(1);
+%OptimizeFunctionOnNextCall(f);
+dummy(f(1));
+dummy(f(2));
diff --git a/deps/v8/test/mjsunit/regress/regress-458876.js b/deps/v8/test/mjsunit/regress/regress-458876.js
new file mode 100644
index 0000000000..7df0615d64
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-458876.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function module() {
+ "use asm";
+ function foo() {
+ do ; while (foo ? 0 : 1) ;
+ return -1 > 0 ? -1 : 0;
+ }
+ return foo;
+}
+
+var foo = module();
+assertEquals(0, foo());
+assertEquals(0, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-458987.js b/deps/v8/test/mjsunit/regress/regress-458987.js
new file mode 100644
index 0000000000..f7a7edcef4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-458987.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function () {
+ "use asm";
+
+ function g() {}
+
+ runNearStackLimit(g);
+})();
+
+function runNearStackLimit(f) {
+ function g() { try { g(); } catch(e) { f(); } };
+ g();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-459955.js b/deps/v8/test/mjsunit/regress/regress-459955.js
new file mode 100644
index 0000000000..24eff6d40c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-459955.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(x) {
+ var v;
+ if (x) v = 0;
+ return v <= 1;
+}
+assertFalse(f(false));
diff --git a/deps/v8/test/mjsunit/regress/regress-arg-materialize-store.js b/deps/v8/test/mjsunit/regress/regress-arg-materialize-store.js
new file mode 100644
index 0000000000..2a30dc87a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-arg-materialize-store.js
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ return f.arguments;
+}
+
+function g(deopt) {
+ var o = { x : 2 };
+ f();
+ o.x = 1;
+ deopt + 0;
+ return o.x;
+}
+
+g(0);
+g(0);
+%OptimizeFunctionOnNextCall(g);
+assertEquals(1, g({}));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-150545.js b/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
index 8238d2fa0d..cfee0618ec 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
@@ -45,10 +45,7 @@
function outer() {
inner(1,2,3);
- // Trigger OSR, if optimization is not disabled.
- if (%GetOptimizationStatus(outer) != 4) {
- while (%GetOptimizationCount(outer) == 0) {}
- }
+ for (var i = 0; i < 3; i++) %OptimizeOsr();
}
outer();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-387599.js b/deps/v8/test/mjsunit/regress/regress-crbug-387599.js
index 98750aa918..753dcfa3a6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-387599.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-387599.js
@@ -8,9 +8,7 @@ Debug = debug.Debug;
Debug.setListener(function() {});
function f() {
- for (var i = 0; i < 100; i++) {
- %OptimizeFunctionOnNextCall(f, "osr");
- }
+ for (var i = 0; i < 100; i++) %OptimizeOsr();
}
Debug.setBreakPoint(f, 0, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-448730.js b/deps/v8/test/mjsunit/regress/regress-crbug-448730.js
new file mode 100644
index 0000000000..31d276aa83
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-448730.js
@@ -0,0 +1,14 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-proxies
+
+function bar() {}
+bar({ a: Proxy.create({}) });
+function foo(x) { x.a.b == ""; }
+var x = {a: {b: "" }};
+foo(x);
+foo(x);
+%OptimizeFunctionOnNextCall(foo);
+foo(x);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-450642.js b/deps/v8/test/mjsunit/regress/regress-crbug-450642.js
new file mode 100644
index 0000000000..7f821e0ffc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-450642.js
@@ -0,0 +1,5 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(function() { with (undefined) {} }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-450960.js b/deps/v8/test/mjsunit/regress/regress-crbug-450960.js
new file mode 100644
index 0000000000..f745522dbe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-450960.js
@@ -0,0 +1,20 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=70
+
+"a".replace(/a/g, "");
+
+function test() {
+ try {
+ test();
+ } catch(e) {
+ "b".replace(/(b)/g, new []);
+ }
+}
+
+try {
+ test();
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-451013.js b/deps/v8/test/mjsunit/regress/regress-crbug-451013.js
new file mode 100644
index 0000000000..d843d3334b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-451013.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(function testDeepArrayLiteral() {
+ testDeepArrayLiteral([], [], [[]]);
+}, RangeError);
+
+assertThrows(function testDeepObjectLiteral() {
+ testDeepObjectLiteral({}, {}, {x:[[]]});
+}, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-451016.js b/deps/v8/test/mjsunit/regress/regress-crbug-451016.js
new file mode 100644
index 0000000000..93152c3665
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-451016.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-filter=STRICT_EQUALS
+
+var value = NaN;
+for (i = 0; i < 256; i++) {
+ value === "A" || value === "B";
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-451770.js b/deps/v8/test/mjsunit/regress/regress-crbug-451770.js
new file mode 100644
index 0000000000..942814a316
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-451770.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-computed-property-names --harmony-classes --harmony-sloppy
+
+assertThrows(function f() {
+ var t = { toString: function() { throw new Error(); } };
+ var o = { [t]: 23 };
+}, Error);
+
+assertThrows(function f() {
+ var t = { toString: function() { throw new Error(); } };
+ class C { [t]() { return 23; } };
+}, Error);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-454091.js b/deps/v8/test/mjsunit/regress/regress-crbug-454091.js
new file mode 100644
index 0000000000..2705e96bfd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-454091.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+this.__proto__ = Array.prototype;
+Object.freeze(this);
+this.length = 1;
+assertThrows('this.__proto__ = {}');
+assertEquals(Array.prototype, this.__proto__);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-455644.js b/deps/v8/test/mjsunit/regress/regress-crbug-455644.js
new file mode 100644
index 0000000000..4993d857a8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-455644.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function f() {
+ do { return 23; } while(false);
+ with (0) {
+ try {
+ return 42;
+ } finally {}
+ }
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-deoptimize-constant-keyed-load.js b/deps/v8/test/mjsunit/regress/regress-deoptimize-constant-keyed-load.js
new file mode 100644
index 0000000000..ed63133c0f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-deoptimize-constant-keyed-load.js
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = { };
+o.__defineGetter__("progressChanged", function() { %DeoptimizeFunction(f); return 10; })
+
+function g(a, b, c) {
+ return a + b + c;
+}
+
+function f() {
+ var t="progressChanged";
+ return g(1, o[t], 100);
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(111, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-merge-descriptors.js b/deps/v8/test/mjsunit/regress/regress-merge-descriptors.js
index a84a6254a0..98e2f26674 100644
--- a/deps/v8/test/mjsunit/regress/regress-merge-descriptors.js
+++ b/deps/v8/test/mjsunit/regress/regress-merge-descriptors.js
@@ -77,7 +77,7 @@ var SuperCar = ((function (Super) {
}
});
- // Convert self.copy from CONSTANT to FIELD.
+ // Convert self.copy from DATA_CONSTANT to DATA.
self.copy = function () { };
return self;
diff --git a/deps/v8/test/mjsunit/regress/regress-undefined-nan.js b/deps/v8/test/mjsunit/regress/regress-undefined-nan.js
new file mode 100644
index 0000000000..93106c5eb1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-undefined-nan.js
@@ -0,0 +1,35 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function loader(dst, src, i) {
+ dst[i] = src[i];
+}
+
+var ab = new ArrayBuffer(8);
+var i_view = new Int32Array(ab);
+i_view[0] = 0xFFF7FFFF;
+i_view[1] = 0xFFF7FFFF;
+var f_view = new Float64Array(ab);
+
+var fixed_double_elements = new Float64Array(1);
+
+function opt_store() { fixed_double_elements[0] = f_view[0]; }
+
+opt_store();
+opt_store();
+%OptimizeFunctionOnNextCall(opt_store);
+opt_store();
+
+var i32 = new Int32Array(fixed_double_elements.buffer);
+assertEquals(i_view[0], i32[0]);
+assertEquals(i_view[1], i32[1]);
+
+var doubles = [0.5];
+loader(doubles, fixed_double_elements, 0);
+loader(doubles, fixed_double_elements, 0);
+%OptimizeFunctionOnNextCall(loader);
+loader(doubles, fixed_double_elements, 0);
+assertTrue(doubles[0] !== undefined);
diff --git a/deps/v8/test/mjsunit/regress/regress-undefined-nan2.js b/deps/v8/test/mjsunit/regress/regress-undefined-nan2.js
new file mode 100644
index 0000000000..9b0a05f8fd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-undefined-nan2.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo(a, i) {
+ var o = [0.5,,1];
+ a[i] = o[i];
+}
+var a1 = [0.1,0.1];
+foo(a1, 0);
+foo(a1, 1);
+assertEquals(undefined, a1[1]);
diff --git a/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js b/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js
new file mode 100644
index 0000000000..636b38a110
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js
@@ -0,0 +1,32 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var ab = new ArrayBuffer(8);
+var i_view = new Int32Array(ab);
+i_view[0] = 0xFFF7FFFF;
+i_view[1] = 0xFFF7FFFF;
+var f_view = new Float64Array(ab);
+
+var fixed_double_elements = new Float64Array(1);
+fixed_double_elements[0] = f_view[0];
+
+function A(src) { this.x = src[0]; }
+
+new A(fixed_double_elements);
+new A(fixed_double_elements);
+
+%OptimizeFunctionOnNextCall(A);
+
+var obj = new A(fixed_double_elements);
+
+function move_x(dst, obj) { dst[0] = obj.x; }
+
+var doubles = [0.5];
+move_x(doubles, obj);
+move_x(doubles, obj);
+%OptimizeFunctionOnNextCall(move_x);
+move_x(doubles, obj);
+assertTrue(doubles[0] !== undefined);
diff --git a/deps/v8/test/mjsunit/regress/string-set-char-deopt.js b/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
index c8e8538e16..03100a3505 100644
--- a/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
+++ b/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
@@ -36,7 +36,7 @@
var world = " world";
%_OneByteSeqStringSetChar(0, (deopt(), 0x48), string);
- if (osr) while (%GetOptimizationStatus(f) == 2) {}
+ for (var i = 0; osr && i < 2; i++) %OptimizeOsr();
return string + world;
}
diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js
index 62d003f9fa..d0839ba0fb 100644
--- a/deps/v8/test/mjsunit/strict-mode.js
+++ b/deps/v8/test/mjsunit/strict-mode.js
@@ -169,18 +169,6 @@ assertThrows('\
"use strict";\
}', SyntaxError);
-// Duplicate data properties.
-CheckStrictMode("var x = { dupe : 1, nondupe: 3, dupe : 2 };", SyntaxError);
-CheckStrictMode("var x = { '1234' : 1, '2345' : 2, '1234' : 3 };", SyntaxError);
-CheckStrictMode("var x = { '1234' : 1, '2345' : 2, 1234 : 3 };", SyntaxError);
-CheckStrictMode("var x = { 3.14 : 1, 2.71 : 2, 3.14 : 3 };", SyntaxError);
-CheckStrictMode("var x = { 3.14 : 1, '3.14' : 2 };", SyntaxError);
-CheckStrictMode("var x = { \
- 123: 1, \
- 123.00000000000000000000000000000000000000000000000000000000000000000001: 2 \
-}", SyntaxError);
-
-// Non-conflicting data properties.
(function StrictModeNonDuplicate() {
"use strict";
var x = { 123 : 1, "0123" : 2 };
@@ -191,37 +179,50 @@ CheckStrictMode("var x = { \
};
})();
-// Two getters (non-strict)
-assertThrows("var x = { get foo() { }, get foo() { } };", SyntaxError);
-assertThrows("var x = { get foo(){}, get 'foo'(){}};", SyntaxError);
-assertThrows("var x = { get 12(){}, get '12'(){}};", SyntaxError);
-
-// Two setters (non-strict)
-assertThrows("var x = { set foo(v) { }, set foo(v) { } };", SyntaxError);
-assertThrows("var x = { set foo(v) { }, set 'foo'(v) { } };", SyntaxError);
-assertThrows("var x = { set 13(v) { }, set '13'(v) { } };", SyntaxError);
-
-// Setter and data (non-strict)
-assertThrows("var x = { foo: 'data', set foo(v) { } };", SyntaxError);
-assertThrows("var x = { set foo(v) { }, foo: 'data' };", SyntaxError);
-assertThrows("var x = { foo: 'data', set 'foo'(v) { } };", SyntaxError);
-assertThrows("var x = { set foo(v) { }, 'foo': 'data' };", SyntaxError);
-assertThrows("var x = { 'foo': 'data', set foo(v) { } };", SyntaxError);
-assertThrows("var x = { set 'foo'(v) { }, foo: 'data' };", SyntaxError);
-assertThrows("var x = { 'foo': 'data', set 'foo'(v) { } };", SyntaxError);
-assertThrows("var x = { set 'foo'(v) { }, 'foo': 'data' };", SyntaxError);
-assertThrows("var x = { 12: 1, set '12'(v){}};", SyntaxError);
-assertThrows("var x = { 12: 1, set 12(v){}};", SyntaxError);
-assertThrows("var x = { '12': 1, set '12'(v){}};", SyntaxError);
-assertThrows("var x = { '12': 1, set 12(v){}};", SyntaxError);
-
-// Getter and data (non-strict)
-assertThrows("var x = { foo: 'data', get foo() { } };", SyntaxError);
-assertThrows("var x = { get foo() { }, foo: 'data' };", SyntaxError);
-assertThrows("var x = { 'foo': 'data', get foo() { } };", SyntaxError);
-assertThrows("var x = { get 'foo'() { }, 'foo': 'data' };", SyntaxError);
-assertThrows("var x = { '12': 1, get '12'(){}};", SyntaxError);
-assertThrows("var x = { '12': 1, get 12(){}};", SyntaxError);
+// Duplicate data properties are allowed in ES6
+(function StrictModeDuplicateES6() {
+ 'use strict';
+ var x = {
+ 123: 1,
+ 123.00000000000000000000000000000000000000000000000000000000000000000001: 2
+ };
+ var x = { dupe : 1, nondupe: 3, dupe : 2 };
+ var x = { '1234' : 1, '2345' : 2, '1234' : 3 };
+ var x = { '1234' : 1, '2345' : 2, 1234 : 3 };
+ var x = { 3.14 : 1, 2.71 : 2, 3.14 : 3 };
+ var x = { 3.14 : 1, '3.14' : 2 };
+
+ var x = { get foo() { }, get foo() { } };
+ var x = { get foo(){}, get 'foo'(){}};
+ var x = { get 12(){}, get '12'(){}};
+
+ // Two setters
+ var x = { set foo(v) { }, set foo(v) { } };
+ var x = { set foo(v) { }, set 'foo'(v) { } };
+ var x = { set 13(v) { }, set '13'(v) { } };
+
+ // Setter and data
+ var x = { foo: 'data', set foo(v) { } };
+ var x = { set foo(v) { }, foo: 'data' };
+ var x = { foo: 'data', set 'foo'(v) { } };
+ var x = { set foo(v) { }, 'foo': 'data' };
+ var x = { 'foo': 'data', set foo(v) { } };
+ var x = { set 'foo'(v) { }, foo: 'data' };
+ var x = { 'foo': 'data', set 'foo'(v) { } };
+ var x = { set 'foo'(v) { }, 'foo': 'data' };
+ var x = { 12: 1, set '12'(v){}};
+ var x = { 12: 1, set 12(v){}};
+ var x = { '12': 1, set '12'(v){}};
+ var x = { '12': 1, set 12(v){}};
+
+ // Getter and data
+ var x = { foo: 'data', get foo() { } };
+ var x = { get foo() { }, foo: 'data' };
+ var x = { 'foo': 'data', get foo() { } };
+ var x = { get 'foo'() { }, 'foo': 'data' };
+ var x = { '12': 1, get '12'(){}};
+ var x = { '12': 1, get 12(){}};
+})();
// Assignment to eval or arguments
CheckStrictMode("function strict() { eval = undefined; }", SyntaxError);
@@ -291,7 +292,7 @@ CheckStrictMode("const x = 0;", SyntaxError);
CheckStrictMode("for (const x = 0; false;) {}", SyntaxError);
CheckStrictMode("function strict() { const x = 0; }", SyntaxError);
-// Strict mode only allows functions in SourceElements
+// Strict mode only allows functions in StatementList
CheckStrictMode("if (true) { function invalid() {} }", SyntaxError);
CheckStrictMode("for (;false;) { function invalid() {} }", SyntaxError);
CheckStrictMode("{ function invalid() {} }", SyntaxError);
diff --git a/deps/v8/test/mjsunit/strong/classes.js b/deps/v8/test/mjsunit/strong/classes.js
new file mode 100644
index 0000000000..3c7caf5f84
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/classes.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+'use strong';
+
+class C {}
+
+(function ImmutableClassBindings() {
+ class D {}
+ assertThrows(function(){ eval("C = 0") }, TypeError);
+ assertThrows(function(){ eval("D = 0") }, TypeError);
+ assertEquals('function', typeof C);
+ assertEquals('function', typeof D);
+})();
diff --git a/deps/v8/test/mjsunit/strong/delete.js b/deps/v8/test/mjsunit/strong/delete.js
new file mode 100644
index 0000000000..349af0bf39
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/delete.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+(function NoDelete() {
+ const o = {a: 0};
+ assertThrows("'use strong'; delete o.a", SyntaxError);
+ assertThrows("'use strong'; delete o", SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/strong/empty-statement.js b/deps/v8/test/mjsunit/strong/empty-statement.js
new file mode 100644
index 0000000000..65edf74733
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/empty-statement.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+(function NoEmptySubStatement() {
+ assertThrows("'use strong'; if (1);", SyntaxError);
+ assertThrows("'use strong'; if (1) {} else;", SyntaxError);
+ assertThrows("'use strong'; while (1);", SyntaxError);
+ assertThrows("'use strong'; do; while (1);", SyntaxError);
+ assertThrows("'use strong'; for (;;);", SyntaxError);
+ assertThrows("'use strong'; for (x in []);", SyntaxError);
+ assertThrows("'use strong'; for (x of []);", SyntaxError);
+ assertThrows("'use strong'; for (let x;;);", SyntaxError);
+ assertThrows("'use strong'; for (let x in []);", SyntaxError);
+ assertThrows("'use strong'; for (let x of []);", SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/strong/equality.js b/deps/v8/test/mjsunit/strong/equality.js
new file mode 100644
index 0000000000..5e2464c372
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/equality.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+(function NoSloppyEquality() {
+ assertThrows("'use strong'; 0 == 0", SyntaxError);
+ assertThrows("'use strong'; 0 != 0", SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/strong/for-in.js b/deps/v8/test/mjsunit/strong/for-in.js
new file mode 100644
index 0000000000..8fa9010202
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/for-in.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+(function NoForInStatement() {
+ assertThrows("'use strong'; for (x in []) {}", SyntaxError);
+ assertThrows("'use strong'; for (let x in []) {}", SyntaxError);
+ assertThrows("'use strong'; for (const x in []) {}", SyntaxError);
+})();
+
+(function ForOfStatement() {
+ assertTrue(eval("'use strong'; for (x of []) {} true"));
+ assertTrue(eval("'use strong'; for (let x of []) {} true"));
+ assertTrue(eval("'use strong'; for (const x of []) {} true"));
+})();
diff --git a/deps/v8/test/mjsunit/strong/functions.js b/deps/v8/test/mjsunit/strong/functions.js
new file mode 100644
index 0000000000..4869ac6dfa
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/functions.js
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+'use strong';
+
+(function NoArguments() {
+ assertThrows("'use strong'; arguments", SyntaxError);
+ assertThrows("'use strong'; function f() { arguments }", SyntaxError);
+ assertThrows("'use strong'; let f = function() { arguments }", SyntaxError);
+ assertThrows("'use strong'; let f = () => arguments", SyntaxError);
+ // The following are strict mode errors already.
+ assertThrows("'use strong'; let arguments", SyntaxError);
+ assertThrows("'use strong'; function f(arguments) {}", SyntaxError);
+ assertThrows("'use strong'; let f = (arguments) => {}", SyntaxError);
+})();
+
+function g() {}
+
+(function LexicalFunctionBindings(global) {
+ assertEquals('function', typeof g);
+ assertEquals(undefined, global.g);
+})(this);
+
+(function ImmutableFunctionBindings() {
+ function f() {}
+ assertThrows(function(){ eval("g = 0") }, TypeError);
+ assertThrows(function(){ eval("f = 0") }, TypeError);
+ assertEquals('function', typeof g);
+ assertEquals('function', typeof f);
+})();
diff --git a/deps/v8/test/mjsunit/strong/mutually-recursive-funcs.js b/deps/v8/test/mjsunit/strong/mutually-recursive-funcs.js
new file mode 100644
index 0000000000..726eed839c
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/mutually-recursive-funcs.js
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+"use strong";
+
+function foo(param, fooCount, barCount) {
+ if (param === 0)
+ return {'foo': fooCount, 'bar': barCount};
+ return bar(param - 1, fooCount + 1, barCount);
+}
+
+function bar(param, fooCount, barCount) {
+ if (param === 0)
+ return {'foo': fooCount, 'bar': barCount};
+ return foo(param - 1, fooCount, barCount + 1);
+}
+
+(function TestMutuallyRecursiveFunctions() {
+ let obj = foo(10, 0, 0);
+ assertEquals(obj.foo, 5);
+ assertEquals(obj.bar, 5);
+})();
diff --git a/deps/v8/test/mjsunit/strong/use-strong.js b/deps/v8/test/mjsunit/strong/use-strong.js
new file mode 100644
index 0000000000..bbda2662c2
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/use-strong.js
@@ -0,0 +1,27 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+(function UseStrongScoping() {
+ assertThrows("'use strong'; 0 == 0", SyntaxError);
+ assertThrows("'use strong'; try {} catch(e) { { 0 == 0 } }", SyntaxError);
+ assertThrows("function f() { 'use strong'; 0 == 0 }", SyntaxError);
+ assertThrows("'use strong'; function f() { 0 == 0 }", SyntaxError);
+ assertThrows("'use strong'; function f() { function g() { 0 == 0 } }", SyntaxError);
+ assertThrows("'use strong'; eval('function f() { 0 == 0 }')", SyntaxError);
+ assertTrue(eval("function f() { 'use strong' } 0 == 0"));
+ assertTrue(eval("eval('\\\'use strong\\\''); 0 == 0"));
+})();
+
+(function UseStrongMixed() {
+ assertThrows("'use strict'; 'use strong'; 0 == 0", SyntaxError);
+ assertThrows("'use strong'; 'use strict'; 0 == 0", SyntaxError);
+ assertThrows("'use strong'; 'use strong'; 0 == 0", SyntaxError);
+ assertThrows("'use strict'; function f() { 'use strong'; 0 == 0 }", SyntaxError);
+ assertThrows("'use strong'; function f() { 'use strict'; 0 == 0 }", SyntaxError);
+ assertTrue(eval("'use strict'; function f() { 'use strong' } 0 == 0"));
+ assertTrue(eval("var x; function f() { 'use strong' } delete x"));
+ assertThrows("'use strict'; var x; function f() { 'use strong' } delete x", SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/strong/var-let-const.js b/deps/v8/test/mjsunit/strong/var-let-const.js
new file mode 100644
index 0000000000..5545ccfa58
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/var-let-const.js
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+(function NoVar() {
+ assertThrows("'use strong'; var x = 0;", SyntaxError);
+ assertThrows("'use strong'; for(var i = 0; i < 10; ++i) { };", SyntaxError);
+})();
+
+
+(function LetIsOkay() {
+ assertTrue(eval("'use strong'; let x = 0; x === 0;"));
+ assertTrue(eval("'use strong'; for(let i = 0; i < 10; ++i) { } 0 === 0;"));
+})();
+
+
+(function ConstIsOkay() {
+ assertTrue(eval("'use strong'; const x = 0; x === 0;"));
+ assertTrue(eval("'use strong'; for(const i = 0; false;) { } 0 === 0;"));
+})();
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index c960ce6b30..8389696f49 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -34,6 +34,7 @@ from testrunner.objects import testcase
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
+MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
class MjsunitTestSuite(testsuite.TestSuite):
@@ -77,7 +78,12 @@ class MjsunitTestSuite(testsuite.TestSuite):
if SELF_SCRIPT_PATTERN.search(source):
env = ["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")]
files = env + files
- files.append(os.path.join(self.root, "mjsunit.js"))
+
+ if not context.no_harness:
+ files.append(os.path.join(self.root, "mjsunit.js"))
+
+ if MODULE_PATTERN.search(source):
+ files.append("--module")
files.append(testfilename)
flags += files
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.default b/deps/v8/test/mjsunit/tools/tickprocessor-test.default
index c2fe441e19..943ec33553 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test.default
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.default
@@ -7,7 +7,6 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
[JavaScript]:
ticks total nonlib name
- 1 7.7% 11.1% LazyCompile: exp native math.js:41
[C++]:
ticks total nonlib name
@@ -18,7 +17,7 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
[Summary]:
ticks total nonlib name
- 1 7.7% 11.1% JavaScript
+ 0 0.0% 0.0% JavaScript
5 38.5% 55.6% C++
0 0.0% 0.0% GC
4 30.8% Shared libraries
@@ -45,6 +44,10 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
2 100.0% LazyCompile: exp native math.js:41
2 100.0% Script: exp.js
+ 2 15.4% UNKNOWN
+ 1 50.0% LazyCompile: exp native math.js:41
+ 1 100.0% Script: exp.js
+
1 7.7% v8::internal::JSObject::LookupOwnRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)
1 100.0% Script: exp.js
@@ -57,6 +60,3 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
1 100.0% LazyCompile: exp native math.js:41
1 100.0% Script: exp.js
- 1 7.7% LazyCompile: exp native math.js:41
- 1 100.0% Script: exp.js
-
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown b/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown
index 263cec551e..1dbf71baa9 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown
@@ -7,7 +7,6 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
[JavaScript]:
ticks total nonlib name
- 1 9.1% 14.3% LazyCompile: exp native math.js:41
[C++]:
ticks total nonlib name
@@ -18,7 +17,7 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
[Summary]:
ticks total nonlib name
- 1 9.1% 14.3% JavaScript
+ 0 0.0% 0.0% JavaScript
5 45.5% 71.4% C++
0 0.0% 0.0% GC
4 36.4% Shared libraries
@@ -44,6 +43,10 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
2 100.0% LazyCompile: exp native math.js:41
2 100.0% Script: exp.js
+ 2 18.2% UNKNOWN
+ 1 50.0% LazyCompile: exp native math.js:41
+ 1 100.0% Script: exp.js
+
1 9.1% v8::internal::JSObject::LookupOwnRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)
1 100.0% Script: exp.js
@@ -56,6 +59,3 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
1 100.0% LazyCompile: exp native math.js:41
1 100.0% Script: exp.js
- 1 9.1% LazyCompile: exp native math.js:41
- 1 100.0% Script: exp.js
-
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic b/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic
index aee1d1f792..52821e7b63 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic
@@ -9,7 +9,6 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
ticks total nonlib name
1 7.7% 11.1% LoadIC: j
1 7.7% 11.1% LoadIC: i
- 1 7.7% 11.1% LazyCompile: exp native math.js:41
[C++]:
ticks total nonlib name
@@ -20,7 +19,7 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
[Summary]:
ticks total nonlib name
- 3 23.1% 33.3% JavaScript
+ 2 15.4% 22.2% JavaScript
5 38.5% 55.6% C++
0 0.0% 0.0% GC
4 30.8% Shared libraries
@@ -47,6 +46,10 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
2 100.0% LazyCompile: exp native math.js:41
2 100.0% Script: exp.js
+ 2 15.4% UNKNOWN
+ 1 50.0% LazyCompile: exp native math.js:41
+ 1 100.0% Script: exp.js
+
1 7.7% v8::internal::JSObject::LookupOwnRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)
1 100.0% Script: exp.js
@@ -63,6 +66,3 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
1 7.7% LoadIC: i
- 1 7.7% LazyCompile: exp native math.js:41
- 1 100.0% Script: exp.js
-
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js
index f460d349bb..b04b9a1765 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.js
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.js
@@ -323,7 +323,7 @@ CppEntriesProviderMock.prototype.parseVmSymbols = function(
function PrintMonitor(outputOrFileName) {
- var expectedOut = typeof outputOrFileName == 'string' ?
+ var expectedOut = this.expectedOut = typeof outputOrFileName == 'string' ?
this.loadExpectedOutput(outputOrFileName) : outputOrFileName;
var outputPos = 0;
var diffs = this.diffs = [];
@@ -359,7 +359,10 @@ PrintMonitor.prototype.loadExpectedOutput = function(fileName) {
PrintMonitor.prototype.finish = function() {
print = this.oldPrint;
if (this.diffs.length > 0 || this.unexpectedOut != null) {
+ print("===== actual output: =====");
print(this.realOut.join('\n'));
+ print("===== expected output: =====");
+ print(this.expectedOut.join('\n'));
assertEquals([], this.diffs);
assertNull(this.unexpectedOut);
}
@@ -383,7 +386,8 @@ function driveTickProcessorTest(
stateFilter,
undefined,
"0",
- "auto,auto");
+ "auto,auto",
+ false);
var pm = new PrintMonitor(testsPath + refOutput);
tp.processLogFileInTest(testsPath + logInput);
tp.printStatistics();
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 9ba07f7368..0d6baf0e22 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -56,7 +56,6 @@
# TODO(turbofan): These are all covered by mjsunit as well. Enable them once
# we pass 'mjsunit' and 'webkit' with TurboFan.
'js1_4/Functions/function-001': [PASS, NO_VARIANTS],
- 'js1_5/Regress/regress-396684': [PASS, NO_VARIANTS],
'js1_5/Regress/regress-80981': [PASS, NO_VARIANTS],
# TODO(turbofan): Large switch statements crash.
@@ -390,7 +389,6 @@
'js1_5/LexicalConventions/regress-469940': [FAIL_OK],
'js1_5/Exceptions/regress-332472': [FAIL_OK],
'js1_5/Regress/regress-173067': [FAIL_OK],
- 'js1_5/Regress/regress-355556': [FAIL_OK],
'js1_5/Regress/regress-328664': [FAIL_OK],
'js1_5/Regress/regress-252892': [FAIL_OK],
'js1_5/Regress/regress-352208': [FAIL_OK],
@@ -872,9 +870,14 @@
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mipsel or arch == mips64el'
+['arch == mipsel and simulator_run == True', {
+ # Crashes due to C stack overflow.
+ 'js1_5/extensions/regress-355497': [SKIP],
+}], # 'arch == mipsel and simulator_run == True'
+
['arch == mips64el and simulator_run == True', {
'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
-}],
+}], # 'arch == mips64el and simulator_run == True'
['arch == mips', {
@@ -895,6 +898,12 @@
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mips'
+['arch == arm and simulator_run == True', {
+
+ #BUG(3837): Crashes due to C stack overflow.
+ 'js1_5/extensions/regress-355497': [SKIP],
+}], # 'arch == arm and simulator_run == True'
+
['arch == arm64 and simulator_run == True', {
'js1_5/GC/regress-203278-2': [SKIP],
diff --git a/deps/v8/test/test262-es6/test262-es6.status b/deps/v8/test/test262-es6/test262-es6.status
index 8662159a73..fd93f295fa 100644
--- a/deps/v8/test/test262-es6/test262-es6.status
+++ b/deps/v8/test/test262-es6/test262-es6.status
@@ -79,16 +79,15 @@
######################## OBSOLETED BY ES6 ###########################
# ES6 allows duplicate properties
- # TODO(arv): Reactivate when check removal has relanded.
- # '11.1.5-4-4-a-1-s': [FAIL],
- # '11.1.5_4-4-b-1': [FAIL],
- # '11.1.5_4-4-b-2': [FAIL],
- # '11.1.5_4-4-c-1': [FAIL],
- # '11.1.5_4-4-c-2': [FAIL],
- # '11.1.5_4-4-d-1': [FAIL],
- # '11.1.5_4-4-d-2': [FAIL],
- # '11.1.5_4-4-d-3': [FAIL],
- # '11.1.5_4-4-d-4': [FAIL],
+ '11.1.5-4-4-a-1-s': [FAIL],
+ '11.1.5_4-4-b-1': [FAIL],
+ '11.1.5_4-4-b-2': [FAIL],
+ '11.1.5_4-4-c-1': [FAIL],
+ '11.1.5_4-4-c-2': [FAIL],
+ '11.1.5_4-4-d-1': [FAIL],
+ '11.1.5_4-4-d-2': [FAIL],
+ '11.1.5_4-4-d-3': [FAIL],
+ '11.1.5_4-4-d-4': [FAIL],
# ES6 does ToObject for Object.prototype.getOwnPropertyNames
'15.2.3.4-1': [FAIL],
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index d32f8f30a3..d1800c5fc5 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -51,6 +51,17 @@
'S12.1_A4_T2': [PASS, FAIL_OK],
'S12.6.4_A15': [PASS, FAIL_OK],
+ # ES6 allows duplicate properties
+ '11.1.5-4-4-a-1-s': [FAIL],
+ '11.1.5_4-4-b-1': [FAIL],
+ '11.1.5_4-4-b-2': [FAIL],
+ '11.1.5_4-4-c-1': [FAIL],
+ '11.1.5_4-4-c-2': [FAIL],
+ '11.1.5_4-4-d-1': [FAIL],
+ '11.1.5_4-4-d-2': [FAIL],
+ '11.1.5_4-4-d-3': [FAIL],
+ '11.1.5_4-4-d-4': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
diff --git a/deps/v8/test/unittests/base/cpu-unittest.cc b/deps/v8/test/unittests/base/cpu-unittest.cc
index 5c58f86238..c12e339701 100644
--- a/deps/v8/test/unittests/base/cpu-unittest.cc
+++ b/deps/v8/test/unittests/base/cpu-unittest.cc
@@ -18,6 +18,8 @@ TEST(CPUTest, FeatureImplications) {
EXPECT_TRUE(!cpu.has_ssse3() || cpu.has_sse3());
EXPECT_TRUE(!cpu.has_sse41() || cpu.has_sse3());
EXPECT_TRUE(!cpu.has_sse42() || cpu.has_sse41());
+ EXPECT_TRUE(!cpu.has_avx() || cpu.has_sse2());
+ EXPECT_TRUE(!cpu.has_fma3() || cpu.has_avx());
// arm features
EXPECT_TRUE(!cpu.has_vfp3_d32() || cpu.has_vfp3());
diff --git a/deps/v8/test/unittests/base/logging-unittest.cc b/deps/v8/test/unittests/base/logging-unittest.cc
new file mode 100644
index 0000000000..918feb114b
--- /dev/null
+++ b/deps/v8/test/unittests/base/logging-unittest.cc
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/logging.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+TEST(LoggingTest, CheckEQImpl) {
+ EXPECT_EQ(nullptr, CheckEQImpl(0.0, 0.0, ""));
+ EXPECT_EQ(nullptr, CheckEQImpl(0.0, -0.0, ""));
+ EXPECT_EQ(nullptr, CheckEQImpl(-0.0, 0.0, ""));
+ EXPECT_EQ(nullptr, CheckEQImpl(-0.0, -0.0, ""));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index b17a9b9ef4..8392b55462 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -36,24 +36,6 @@ TEST(OS, GetCurrentProcessId) {
namespace {
-class SelfJoinThread FINAL : public Thread {
- public:
- SelfJoinThread() : Thread(Options("SelfJoinThread")) {}
- void Run() FINAL { Join(); }
-};
-
-} // namespace
-
-
-TEST(Thread, DISABLE_ON_ANDROID(SelfJoin)) {
- SelfJoinThread thread;
- thread.Start();
- thread.Join();
-}
-
-
-namespace {
-
class ThreadLocalStorageTest : public Thread, public ::testing::Test {
public:
ThreadLocalStorageTest() : Thread(Options("ThreadLocalStorageTest")) {
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index fbdf87a2b2..1fa0b10842 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -1479,6 +1479,85 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
// -----------------------------------------------------------------------------
+// Floating point comparisons.
+
+
+const Comparison kFPComparisons[] = {
+ {&RawMachineAssembler::Float64Equal, "Float64Equal", kEqual, kNotEqual},
+ {&RawMachineAssembler::Float64LessThan, "Float64LessThan",
+ kUnsignedLessThan, kUnsignedGreaterThanOrEqual},
+ {&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+ kUnsignedLessThanOrEqual, kUnsignedGreaterThan}};
+
+
+typedef InstructionSelectorTestWithParam<Comparison>
+ InstructionSelectorFPComparisonTest;
+
+
+TEST_P(InstructionSelectorFPComparisonTest, WithParameters) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachFloat64, kMachFloat64);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorFPComparisonTest, NegatedWithParameters) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachFloat64, kMachFloat64);
+ m.Return(
+ m.WordBinaryNot((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.negated_flags_condition, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorFPComparisonTest, WithImmediateZeroOnRight) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, kMachFloat64);
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Float64Constant(0.0)));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorFPComparisonTest,
+ ::testing::ValuesIn(kFPComparisons));
+
+
+TEST_F(InstructionSelectorTest, Float64EqualWithImmediateZeroOnLeft) {
+ StreamBuilder m(this, kMachInt32, kMachFloat64);
+ m.Return(m.Float64Equal(m.Float64Constant(0.0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+// -----------------------------------------------------------------------------
// Miscellaneous.
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index cd3ce090f7..571dbecd14 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -62,7 +62,7 @@ Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
// ARM64 logical instructions.
-static const MachInst2 kLogicalInstructions[] = {
+const MachInst2 kLogicalInstructions[] = {
{&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32},
{&RawMachineAssembler::Word64And, "Word64And", kArm64And, kMachInt64},
{&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32, kMachInt32},
@@ -74,7 +74,7 @@ static const MachInst2 kLogicalInstructions[] = {
// ARM64 logical immediates: contiguous set bits, rotated about a power of two
// sized block. The block is then duplicated across the word. Below is a random
// subset of the 32-bit immediates.
-static const uint32_t kLogical32Immediates[] = {
+const uint32_t kLogical32Immediates[] = {
0x00000002, 0x00000003, 0x00000070, 0x00000080, 0x00000100, 0x000001c0,
0x00000300, 0x000007e0, 0x00003ffc, 0x00007fc0, 0x0003c000, 0x0003f000,
0x0003ffc0, 0x0003fff8, 0x0007ff00, 0x0007ffe0, 0x000e0000, 0x001e0000,
@@ -95,7 +95,7 @@ static const uint32_t kLogical32Immediates[] = {
// Random subset of 64-bit logical immediates.
-static const uint64_t kLogical64Immediates[] = {
+const uint64_t kLogical64Immediates[] = {
0x0000000000000001, 0x0000000000000002, 0x0000000000000003,
0x0000000000000070, 0x0000000000000080, 0x0000000000000100,
0x00000000000001c0, 0x0000000000000300, 0x0000000000000600,
@@ -131,7 +131,7 @@ std::ostream& operator<<(std::ostream& os, const AddSub& op) {
}
-static const AddSub kAddSubInstructions[] = {
+const AddSub kAddSubInstructions[] = {
{{&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32, kMachInt32},
kArm64Sub32},
{{&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add, kMachInt64},
@@ -144,7 +144,7 @@ static const AddSub kAddSubInstructions[] = {
// ARM64 Add/Sub immediates: 12-bit immediate optionally shifted by 12.
// Below is a combination of a random subset and some edge values.
-static const int32_t kAddSubImmediates[] = {
+const int32_t kAddSubImmediates[] = {
0, 1, 69, 493, 599, 701, 719,
768, 818, 842, 945, 1246, 1286, 1429,
1669, 2171, 2179, 2182, 2254, 2334, 2338,
@@ -160,7 +160,7 @@ static const int32_t kAddSubImmediates[] = {
// ARM64 flag setting data processing instructions.
-static const MachInst2 kDPFlagSetInstructions[] = {
+const MachInst2 kDPFlagSetInstructions[] = {
{&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32, kMachInt32},
{&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32, kMachInt32},
{&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32, kMachInt32},
@@ -168,7 +168,7 @@ static const MachInst2 kDPFlagSetInstructions[] = {
// ARM64 arithmetic with overflow instructions.
-static const MachInst2 kOvfAddSubInstructions[] = {
+const MachInst2 kOvfAddSubInstructions[] = {
{&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
kArm64Add32, kMachInt32},
{&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
@@ -176,7 +176,7 @@ static const MachInst2 kOvfAddSubInstructions[] = {
// ARM64 shift instructions.
-static const Shift kShiftInstructions[] = {
+const Shift kShiftInstructions[] = {
{{&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32},
kMode_Operand2_R_LSL_I},
{{&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64},
@@ -196,7 +196,7 @@ static const Shift kShiftInstructions[] = {
// ARM64 Mul/Div instructions.
-static const MachInst2 kMulDivInstructions[] = {
+const MachInst2 kMulDivInstructions[] = {
{&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32, kMachInt32},
{&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul, kMachInt64},
{&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32, kMachInt32},
@@ -206,7 +206,7 @@ static const MachInst2 kMulDivInstructions[] = {
// ARM64 FP arithmetic instructions.
-static const MachInst2 kFPArithInstructions[] = {
+const MachInst2 kFPArithInstructions[] = {
{&RawMachineAssembler::Float64Add, "Float64Add", kArm64Float64Add,
kMachFloat64},
{&RawMachineAssembler::Float64Sub, "Float64Sub", kArm64Float64Sub,
@@ -229,16 +229,16 @@ std::ostream& operator<<(std::ostream& os, const FPCmp& cmp) {
// ARM64 FP comparison instructions.
-static const FPCmp kFPCmpInstructions[] = {
+const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kArm64Float64Cmp,
kMachFloat64},
- kUnorderedEqual},
+ kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
kArm64Float64Cmp, kMachFloat64},
- kUnorderedLessThan},
+ kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
kArm64Float64Cmp, kMachFloat64},
- kUnorderedLessThanOrEqual}};
+ kUnsignedLessThanOrEqual}};
struct Conversion {
@@ -254,7 +254,7 @@ std::ostream& operator<<(std::ostream& os, const Conversion& conv) {
// ARM64 type conversion instructions.
-static const Conversion kConversionInstructions[] = {
+const Conversion kConversionInstructions[] = {
{{&RawMachineAssembler::ChangeFloat32ToFloat64, "ChangeFloat32ToFloat64",
kArm64Float32ToFloat64, kMachFloat64},
kMachFloat32},
@@ -1518,10 +1518,39 @@ TEST_P(InstructionSelectorFPCmpTest, Parameter) {
}
+TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnRight) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, cmp.mi.machine_type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Float64Constant(0.0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
::testing::ValuesIn(kFPCmpInstructions));
+TEST_F(InstructionSelectorTest, Float64EqualWithImmediateZeroOnLeft) {
+ StreamBuilder m(this, kMachInt32, kMachFloat64);
+ m.Return(m.Float64Equal(m.Float64Constant(0.0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
// -----------------------------------------------------------------------------
// Conversions.
diff --git a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
index 060b1c1842..5b31f5e04c 100644
--- a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
@@ -6,7 +6,7 @@
#include "src/compiler/change-lowering.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -66,10 +66,8 @@ class ChangeLoweringTest : public GraphTest {
Reduction Reduce(Node* node) {
MachineOperatorBuilder machine(zone(), WordRepresentation());
JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(graph(), common(), &javascript, &machine);
- CompilationInfo info(isolate(), zone());
- Linkage linkage(zone(), &info);
- ChangeLowering reducer(&jsgraph, &linkage);
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, &machine);
+ ChangeLowering reducer(&jsgraph);
return reducer.Reduce(node);
}
@@ -211,7 +209,7 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTaggedSmall) {
Node* val = Parameter(0);
Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
- NodeProperties::SetBounds(val, Bounds(Type::None(), Type::SignedSmall()));
+ NodeProperties::SetBounds(val, Bounds(Type::None(), Type::Signed31()));
Reduction reduction = Reduce(node);
ASSERT_TRUE(reduction.Changed());
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index c713815466..1f6044b97c 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -5,6 +5,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/machine-type.h"
+#include "src/compiler/operator.h"
#include "test/unittests/compiler/graph-unittest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index d0ac14519f..6e60cfd12a 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -28,6 +28,7 @@ struct SharedOperator {
int value_input_count;
int effect_input_count;
int control_input_count;
+ int value_output_count;
int effect_output_count;
int control_output_count;
};
@@ -39,19 +40,21 @@ std::ostream& operator<<(std::ostream& os, const SharedOperator& fop) {
const SharedOperator kSharedOperators[] = {
-#define SHARED(Name, properties, value_input_count, effect_input_count, \
- control_input_count, effect_output_count, control_output_count) \
- { \
- &CommonOperatorBuilder::Name, IrOpcode::k##Name, properties, \
- value_input_count, effect_input_count, control_input_count, \
- effect_output_count, control_output_count \
+#define SHARED(Name, properties, value_input_count, effect_input_count, \
+ control_input_count, value_output_count, effect_output_count, \
+ control_output_count) \
+ { \
+ &CommonOperatorBuilder::Name, IrOpcode::k##Name, properties, \
+ value_input_count, effect_input_count, control_input_count, \
+ value_output_count, effect_output_count, control_output_count \
}
- SHARED(Dead, Operator::kFoldable, 0, 0, 0, 0, 1),
- SHARED(End, Operator::kFoldable, 0, 0, 1, 0, 0),
- SHARED(IfTrue, Operator::kFoldable, 0, 0, 1, 0, 1),
- SHARED(IfFalse, Operator::kFoldable, 0, 0, 1, 0, 1),
- SHARED(Throw, Operator::kFoldable, 1, 1, 1, 0, 1),
- SHARED(Return, Operator::kNoProperties, 1, 1, 1, 0, 1)
+ SHARED(Always, Operator::kPure, 0, 0, 0, 1, 0, 0),
+ SHARED(Dead, Operator::kFoldable, 0, 0, 0, 0, 0, 1),
+ SHARED(End, Operator::kKontrol, 0, 0, 1, 0, 0, 0),
+ SHARED(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
+ SHARED(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
+ SHARED(Throw, Operator::kFoldable, 1, 1, 1, 0, 0, 1),
+ SHARED(Return, Operator::kNoThrow, 1, 1, 1, 0, 0, 1)
#undef SHARED
};
@@ -83,7 +86,7 @@ TEST_P(CommonSharedOperatorTest, NumberOfInputsAndOutputs) {
sop.value_input_count + sop.effect_input_count + sop.control_input_count,
OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(sop.value_output_count, op->ValueOutputCount());
EXPECT_EQ(sop.effect_output_count, op->EffectOutputCount());
EXPECT_EQ(sop.control_output_count, op->ControlOutputCount());
}
@@ -130,6 +133,9 @@ class CommonOperatorTest : public TestWithZone {
const int kArguments[] = {1, 5, 6, 42, 100, 10000, 65000};
+const size_t kCases[] = {3, 4, 100, 255, 1024, 65000};
+
+
const float kFloatValues[] = {-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::min(),
-1.0f,
@@ -154,6 +160,20 @@ const double kDoubleValues[] = {-std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::signaling_NaN()};
+const int32_t kInt32Values[] = {
+ std::numeric_limits<int32_t>::min(), -1914954528, -1698749618, -1578693386,
+ -1577976073, -1573998034, -1529085059, -1499540537, -1299205097,
+ -1090814845, -938186388, -806828902, -750927650, -520676892, -513661538,
+ -453036354, -433622833, -282638793, -28375, -27788, -22770, -18806, -14173,
+ -11956, -11200, -10212, -8160, -3751, -2758, -1522, -121, -120, -118, -117,
+ -106, -84, -80, -74, -59, -52, -48, -39, -35, -17, -11, -10, -9, -7, -5, 0,
+ 9, 12, 17, 23, 29, 31, 33, 35, 40, 47, 55, 56, 62, 64, 67, 68, 69, 74, 79,
+ 84, 89, 90, 97, 104, 118, 124, 126, 127, 7278, 17787, 24136, 24202, 25570,
+ 26680, 30242, 32399, 420886487, 642166225, 821912648, 822577803, 851385718,
+ 1212241078, 1411419304, 1589626102, 1596437184, 1876245816, 1954730266,
+ 2008792749, 2045320228, std::numeric_limits<int32_t>::max()};
+
+
const BranchHint kHints[] = {BranchHint::kNone, BranchHint::kTrue,
BranchHint::kFalse};
@@ -164,7 +184,7 @@ TEST_F(CommonOperatorTest, Branch) {
TRACED_FOREACH(BranchHint, hint, kHints) {
const Operator* const op = common()->Branch(hint);
EXPECT_EQ(IrOpcode::kBranch, op->opcode());
- EXPECT_EQ(Operator::kFoldable, op->properties());
+ EXPECT_EQ(Operator::kKontrol, op->properties());
EXPECT_EQ(hint, BranchHintOf(op));
EXPECT_EQ(1, op->ValueInputCount());
EXPECT_EQ(0, op->EffectInputCount());
@@ -177,6 +197,39 @@ TEST_F(CommonOperatorTest, Branch) {
}
+TEST_F(CommonOperatorTest, Switch) {
+ TRACED_FOREACH(size_t, cases, kCases) {
+ const Operator* const op = common()->Switch(cases);
+ EXPECT_EQ(IrOpcode::kSwitch, op->opcode());
+ EXPECT_EQ(Operator::kKontrol, op->properties());
+ EXPECT_EQ(1, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(static_cast<int>(cases), op->ControlOutputCount());
+ }
+}
+
+
+TEST_F(CommonOperatorTest, IfValue) {
+ TRACED_FOREACH(int32_t, value, kInt32Values) {
+ const Operator* const op = common()->IfValue(value);
+ EXPECT_EQ(IrOpcode::kIfValue, op->opcode());
+ EXPECT_EQ(Operator::kKontrol, op->properties());
+ EXPECT_EQ(value, OpParameter<int32_t>(op));
+ EXPECT_EQ(0, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ControlOutputCount());
+ }
+}
+
+
TEST_F(CommonOperatorTest, Select) {
static const MachineType kTypes[] = {
kMachInt8, kMachUint8, kMachInt16, kMachUint16,
diff --git a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
index 56b4a2bf27..17716ab1a9 100644
--- a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
@@ -5,7 +5,7 @@
#include "src/bit-vector.h"
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/zone-containers.h"
#include "test/unittests/compiler/graph-unittest.h"
diff --git a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
new file mode 100644
index 0000000000..c083d4bab5
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -0,0 +1,70 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/control-flow-optimizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ControlFlowOptimizerTest : public GraphTest {
+ public:
+ explicit ControlFlowOptimizerTest(int num_parameters = 3)
+ : GraphTest(num_parameters), machine_(zone()) {}
+ ~ControlFlowOptimizerTest() OVERRIDE {}
+
+ protected:
+ void Optimize() {
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, machine());
+ ControlFlowOptimizer optimizer(&jsgraph, zone());
+ optimizer.Optimize();
+ }
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+};
+
+
+TEST_F(ControlFlowOptimizerTest, Switch) {
+ Node* index = Parameter(0);
+ Node* branch0 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(0)),
+ start());
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* branch1 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(1)),
+ if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* merge =
+ graph()->NewNode(common()->Merge(3), if_true0, if_true1, if_false1);
+ graph()->SetEnd(graph()->NewNode(common()->End(), merge));
+ Optimize();
+ Capture<Node*> switch_capture;
+ EXPECT_THAT(end(),
+ IsEnd(IsMerge(IsIfValue(0, CaptureEq(&switch_capture)),
+ IsIfValue(1, CaptureEq(&switch_capture)),
+ IsIfDefault(AllOf(CaptureEq(&switch_capture),
+ IsSwitch(index, start()))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/control-reducer-unittest.cc b/deps/v8/test/unittests/compiler/control-reducer-unittest.cc
new file mode 100644
index 0000000000..5e9b0ef4b1
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/control-reducer-unittest.cc
@@ -0,0 +1,124 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/control-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ControlReducerTest : public GraphTest {
+ protected:
+ void ReduceGraph() {
+ JSOperatorBuilder javascript(zone());
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, &machine);
+ ControlReducer::ReduceGraph(zone(), &jsgraph, common());
+ }
+};
+
+
+TEST_F(ControlReducerTest, NonTerminatingLoop) {
+ Node* loop = graph()->NewNode(common()->Loop(2), graph()->start());
+ loop->AppendInput(graph()->zone(), loop);
+ ReduceGraph();
+ Capture<Node*> branch;
+ EXPECT_THAT(
+ graph()->end(),
+ IsEnd(IsMerge(
+ graph()->start(),
+ IsReturn(IsUndefinedConstant(), graph()->start(),
+ IsIfFalse(
+ AllOf(CaptureEq(&branch),
+ IsBranch(IsAlways(),
+ AllOf(loop, IsLoop(graph()->start(),
+ IsIfTrue(CaptureEq(
+ &branch)))))))))));
+}
+
+
+TEST_F(ControlReducerTest, NonTerminatingLoopWithEffectPhi) {
+ Node* loop = graph()->NewNode(common()->Loop(2), graph()->start());
+ loop->AppendInput(graph()->zone(), loop);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), graph()->start());
+ ephi->AppendInput(graph()->zone(), ephi);
+ ephi->AppendInput(graph()->zone(), loop);
+ ReduceGraph();
+ Capture<Node*> branch;
+ EXPECT_THAT(
+ graph()->end(),
+ IsEnd(IsMerge(
+ graph()->start(),
+ IsReturn(IsUndefinedConstant(),
+ AllOf(ephi, IsEffectPhi(graph()->start(), ephi, loop)),
+ IsIfFalse(
+ AllOf(CaptureEq(&branch),
+ IsBranch(IsAlways(),
+ AllOf(loop, IsLoop(graph()->start(),
+ IsIfTrue(CaptureEq(
+ &branch)))))))))));
+}
+
+
+TEST_F(ControlReducerTest, NonTerminatingLoopWithTwoEffectPhis) {
+ Node* loop = graph()->NewNode(common()->Loop(2), graph()->start());
+ loop->AppendInput(graph()->zone(), loop);
+ Node* ephi1 = graph()->NewNode(common()->EffectPhi(2), graph()->start());
+ ephi1->AppendInput(graph()->zone(), ephi1);
+ ephi1->AppendInput(graph()->zone(), loop);
+ Node* ephi2 = graph()->NewNode(common()->EffectPhi(2), graph()->start());
+ ephi2->AppendInput(graph()->zone(), ephi2);
+ ephi2->AppendInput(graph()->zone(), loop);
+ ReduceGraph();
+ Capture<Node*> branch;
+ EXPECT_THAT(
+ graph()->end(),
+ IsEnd(IsMerge(
+ graph()->start(),
+ IsReturn(
+ IsUndefinedConstant(),
+ IsEffectSet(
+ AllOf(ephi1, IsEffectPhi(graph()->start(), ephi1, loop)),
+ AllOf(ephi2, IsEffectPhi(graph()->start(), ephi2, loop))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(
+ IsAlways(),
+ AllOf(loop, IsLoop(graph()->start(),
+ IsIfTrue(CaptureEq(&branch)))))))))));
+}
+
+
+TEST_F(ControlReducerTest, NonTerminatingLoopWithDeadEnd) {
+ Node* loop = graph()->NewNode(common()->Loop(2), graph()->start());
+ loop->AppendInput(graph()->zone(), loop);
+ graph()->end()->ReplaceInput(0, graph()->NewNode(common()->Dead()));
+ ReduceGraph();
+ Capture<Node*> branch;
+ EXPECT_THAT(
+ graph()->end(),
+ IsEnd(IsReturn(
+ IsUndefinedConstant(), graph()->start(),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsAlways(),
+ AllOf(loop, IsLoop(graph()->start(),
+ IsIfTrue(CaptureEq(&branch))))))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index dbdd4bb62f..9643d2399a 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -18,24 +19,188 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
struct TestOperator : public Operator {
TestOperator(Operator::Opcode opcode, Operator::Properties properties,
- size_t value_in, size_t value_out)
- : Operator(opcode, properties, "TestOp", value_in, 0, 0, value_out, 0,
- 0) {}
+ const char* op_name, size_t value_in, size_t value_out)
+ : Operator(opcode, properties, op_name, value_in, 0, 0, value_out, 0, 0) {
+ }
};
-namespace {
-
-TestOperator OP0(0, Operator::kNoWrite, 0, 1);
-TestOperator OP1(1, Operator::kNoProperties, 1, 1);
+const uint8_t kOpcodeA0 = 10;
+const uint8_t kOpcodeA1 = 11;
+const uint8_t kOpcodeA2 = 12;
+const uint8_t kOpcodeB0 = 20;
+const uint8_t kOpcodeB1 = 21;
+const uint8_t kOpcodeB2 = 22;
+const uint8_t kOpcodeC0 = 30;
+const uint8_t kOpcodeC1 = 31;
+const uint8_t kOpcodeC2 = 32;
+static TestOperator kOpA0(kOpcodeA0, Operator::kNoWrite, "opa1", 0, 1);
+static TestOperator kOpA1(kOpcodeA1, Operator::kNoProperties, "opa2", 1, 1);
+static TestOperator kOpA2(kOpcodeA2, Operator::kNoProperties, "opa3", 2, 1);
+static TestOperator kOpB0(kOpcodeB0, Operator::kNoWrite, "opa0", 0, 0);
+static TestOperator kOpB1(kOpcodeB1, Operator::kNoWrite, "opa1", 1, 0);
+static TestOperator kOpB2(kOpcodeB2, Operator::kNoWrite, "opa2", 2, 0);
+static TestOperator kOpC0(kOpcodeC0, Operator::kNoWrite, "opc0", 0, 0);
+static TestOperator kOpC1(kOpcodeC1, Operator::kNoWrite, "opc1", 1, 0);
+static TestOperator kOpC2(kOpcodeC2, Operator::kNoWrite, "opc2", 2, 0);
struct MockReducer : public Reducer {
MOCK_METHOD1(Reduce, Reduction(Node*));
};
+
+// Replaces all "A" operators with "B" operators without creating new nodes.
+class InPlaceABReducer : public Reducer {
+ public:
+ virtual Reduction Reduce(Node* node) {
+ switch (node->op()->opcode()) {
+ case kOpcodeA0:
+ EXPECT_EQ(0, node->InputCount());
+ node->set_op(&kOpB0);
+ return Replace(node);
+ case kOpcodeA1:
+ EXPECT_EQ(1, node->InputCount());
+ node->set_op(&kOpB1);
+ return Replace(node);
+ case kOpcodeA2:
+ EXPECT_EQ(2, node->InputCount());
+ node->set_op(&kOpB2);
+ return Replace(node);
+ }
+ return NoChange();
+ }
+};
+
+
+// Replaces all "A" operators with "B" operators by allocating new nodes.
+class NewABReducer : public Reducer {
+ public:
+ explicit NewABReducer(Graph* graph) : graph_(graph) {}
+ virtual Reduction Reduce(Node* node) {
+ switch (node->op()->opcode()) {
+ case kOpcodeA0:
+ EXPECT_EQ(0, node->InputCount());
+ return Replace(graph_->NewNode(&kOpB0));
+ case kOpcodeA1:
+ EXPECT_EQ(1, node->InputCount());
+ return Replace(graph_->NewNode(&kOpB1, node->InputAt(0)));
+ case kOpcodeA2:
+ EXPECT_EQ(2, node->InputCount());
+ return Replace(
+ graph_->NewNode(&kOpB2, node->InputAt(0), node->InputAt(1)));
+ }
+ return NoChange();
+ }
+ Graph* graph_;
+};
+
+
+// Wraps all "kOpA0" nodes in "kOpB1" operators by allocating new nodes.
+class A0Wrapper FINAL : public Reducer {
+ public:
+ explicit A0Wrapper(Graph* graph) : graph_(graph) {}
+ virtual Reduction Reduce(Node* node) OVERRIDE {
+ switch (node->op()->opcode()) {
+ case kOpcodeA0:
+ EXPECT_EQ(0, node->InputCount());
+ return Replace(graph_->NewNode(&kOpB1, node));
+ }
+ return NoChange();
+ }
+ Graph* graph_;
+};
+
+
+// Wraps all "kOpB0" nodes in two "kOpC1" operators by allocating new nodes.
+class B0Wrapper FINAL : public Reducer {
+ public:
+ explicit B0Wrapper(Graph* graph) : graph_(graph) {}
+ virtual Reduction Reduce(Node* node) OVERRIDE {
+ switch (node->op()->opcode()) {
+ case kOpcodeB0:
+ EXPECT_EQ(0, node->InputCount());
+ return Replace(graph_->NewNode(&kOpC1, graph_->NewNode(&kOpC1, node)));
+ }
+ return NoChange();
+ }
+ Graph* graph_;
+};
+
+
+// Replaces all "kOpA1" nodes with the first input.
+class A1Forwarder : public Reducer {
+ virtual Reduction Reduce(Node* node) {
+ switch (node->op()->opcode()) {
+ case kOpcodeA1:
+ EXPECT_EQ(1, node->InputCount());
+ return Replace(node->InputAt(0));
+ }
+ return NoChange();
+ }
+};
+
+
+// Replaces all "kOpB1" nodes with the first input.
+class B1Forwarder : public Reducer {
+ virtual Reduction Reduce(Node* node) {
+ switch (node->op()->opcode()) {
+ case kOpcodeB1:
+ EXPECT_EQ(1, node->InputCount());
+ return Replace(node->InputAt(0));
+ }
+ return NoChange();
+ }
+};
+
+
+// Replaces all "B" operators with "C" operators without creating new nodes.
+class InPlaceBCReducer : public Reducer {
+ public:
+ virtual Reduction Reduce(Node* node) {
+ switch (node->op()->opcode()) {
+ case kOpcodeB0:
+ EXPECT_EQ(0, node->InputCount());
+ node->set_op(&kOpC0);
+ return Replace(node);
+ case kOpcodeB1:
+ EXPECT_EQ(1, node->InputCount());
+ node->set_op(&kOpC1);
+ return Replace(node);
+ case kOpcodeB2:
+ EXPECT_EQ(2, node->InputCount());
+ node->set_op(&kOpC2);
+ return Replace(node);
+ }
+ return NoChange();
+ }
+};
+
+
+// Swaps the inputs to "kOp2A" and "kOp2B" nodes based on ids.
+class AB2Sorter : public Reducer {
+ virtual Reduction Reduce(Node* node) {
+ switch (node->op()->opcode()) {
+ case kOpcodeA2:
+ case kOpcodeB2:
+ EXPECT_EQ(2, node->InputCount());
+ Node* x = node->InputAt(0);
+ Node* y = node->InputAt(1);
+ if (x->id() > y->id()) {
+ node->ReplaceInput(0, y);
+ node->ReplaceInput(1, x);
+ return Replace(node);
+ }
+ }
+ return NoChange();
+ }
+};
+
+
} // namespace
@@ -75,6 +240,27 @@ class GraphReducerTest : public TestWithZone {
reducer.ReduceNode(node);
}
+ void ReduceGraph(Reducer* r1) {
+ GraphReducer reducer(graph(), zone());
+ reducer.AddReducer(r1);
+ reducer.ReduceGraph();
+ }
+
+ void ReduceGraph(Reducer* r1, Reducer* r2) {
+ GraphReducer reducer(graph(), zone());
+ reducer.AddReducer(r1);
+ reducer.AddReducer(r2);
+ reducer.ReduceGraph();
+ }
+
+ void ReduceGraph(Reducer* r1, Reducer* r2, Reducer* r3) {
+ GraphReducer reducer(graph(), zone());
+ reducer.AddReducer(r1);
+ reducer.AddReducer(r2);
+ reducer.AddReducer(r3);
+ reducer.ReduceGraph();
+ }
+
Graph* graph() { return &graph_; }
private:
@@ -84,9 +270,9 @@ class GraphReducerTest : public TestWithZone {
TEST_F(GraphReducerTest, NodeIsDeadAfterReplace) {
StrictMock<MockReducer> r;
- Node* node0 = graph()->NewNode(&OP0);
- Node* node1 = graph()->NewNode(&OP1, node0);
- Node* node2 = graph()->NewNode(&OP1, node0);
+ Node* node0 = graph()->NewNode(&kOpA0);
+ Node* node1 = graph()->NewNode(&kOpA1, node0);
+ Node* node2 = graph()->NewNode(&kOpA1, node0);
EXPECT_CALL(r, Reduce(node0)).WillOnce(Return(Reducer::NoChange()));
EXPECT_CALL(r, Reduce(node1)).WillOnce(Return(Reducer::Replace(node2)));
ReduceNode(node1, &r);
@@ -98,7 +284,7 @@ TEST_F(GraphReducerTest, NodeIsDeadAfterReplace) {
TEST_F(GraphReducerTest, ReduceOnceForEveryReducer) {
StrictMock<MockReducer> r1, r2;
- Node* node0 = graph()->NewNode(&OP0);
+ Node* node0 = graph()->NewNode(&kOpA0);
EXPECT_CALL(r1, Reduce(node0));
EXPECT_CALL(r2, Reduce(node0));
ReduceNode(node0, &r1, &r2);
@@ -108,7 +294,7 @@ TEST_F(GraphReducerTest, ReduceOnceForEveryReducer) {
TEST_F(GraphReducerTest, ReduceAgainAfterChanged) {
Sequence s1, s2, s3;
StrictMock<MockReducer> r1, r2, r3;
- Node* node0 = graph()->NewNode(&OP0);
+ Node* node0 = graph()->NewNode(&kOpA0);
EXPECT_CALL(r1, Reduce(node0));
EXPECT_CALL(r2, Reduce(node0));
EXPECT_CALL(r3, Reduce(node0)).InSequence(s1, s2, s3).WillOnce(
@@ -118,6 +304,370 @@ TEST_F(GraphReducerTest, ReduceAgainAfterChanged) {
ReduceNode(node0, &r1, &r2, &r3);
}
+
+TEST_F(GraphReducerTest, ReduceGraphFromEnd1) {
+ StrictMock<MockReducer> r1;
+ Node* n = graph()->NewNode(&kOpA0);
+ Node* end = graph()->NewNode(&kOpA1, n);
+ graph()->SetEnd(end);
+ Sequence s;
+ EXPECT_CALL(r1, Reduce(n));
+ EXPECT_CALL(r1, Reduce(end));
+ ReduceGraph(&r1);
+}
+
+
+TEST_F(GraphReducerTest, ReduceGraphFromEnd2) {
+ StrictMock<MockReducer> r1;
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+ Sequence s1, s2;
+ EXPECT_CALL(r1, Reduce(n1)).InSequence(s1, s2);
+ EXPECT_CALL(r1, Reduce(n2)).InSequence(s1);
+ EXPECT_CALL(r1, Reduce(n3)).InSequence(s2);
+ EXPECT_CALL(r1, Reduce(end)).InSequence(s1, s2);
+ ReduceGraph(&r1);
+}
+
+
+TEST_F(GraphReducerTest, ReduceInPlace1) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* end = graph()->NewNode(&kOpA1, n1);
+ graph()->SetEnd(end);
+
+ // Tests A* => B* with in-place updates.
+ InPlaceABReducer r;
+ for (int i = 0; i < 3; i++) {
+ int before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpB0, n1->op());
+ EXPECT_EQ(&kOpB1, end->op());
+ EXPECT_EQ(n1, end->InputAt(0));
+ }
+}
+
+
+TEST_F(GraphReducerTest, ReduceInPlace2) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+
+ // Tests A* => B* with in-place updates.
+ InPlaceABReducer r;
+ for (int i = 0; i < 3; i++) {
+ int before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpB0, n1->op());
+ EXPECT_EQ(&kOpB1, n2->op());
+ EXPECT_EQ(n1, n2->InputAt(0));
+ EXPECT_EQ(&kOpB1, n3->op());
+ EXPECT_EQ(n1, n3->InputAt(0));
+ EXPECT_EQ(&kOpB2, end->op());
+ EXPECT_EQ(n2, end->InputAt(0));
+ EXPECT_EQ(n3, end->InputAt(1));
+ }
+}
+
+
+TEST_F(GraphReducerTest, ReduceNew1) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+
+ NewABReducer r(graph());
+ // Tests A* => B* while creating new nodes.
+ for (int i = 0; i < 3; i++) {
+ int before = graph()->NodeCount();
+ ReduceGraph(&r);
+ if (i == 0) {
+ EXPECT_NE(before, graph()->NodeCount());
+ } else {
+ EXPECT_EQ(before, graph()->NodeCount());
+ }
+ Node* nend = graph()->end();
+ EXPECT_NE(end, nend); // end() should be updated too.
+
+ Node* nn2 = nend->InputAt(0);
+ Node* nn3 = nend->InputAt(1);
+ Node* nn1 = nn2->InputAt(0);
+
+ EXPECT_EQ(nn1, nn3->InputAt(0));
+
+ EXPECT_EQ(&kOpB0, nn1->op());
+ EXPECT_EQ(&kOpB1, nn2->op());
+ EXPECT_EQ(&kOpB1, nn3->op());
+ EXPECT_EQ(&kOpB2, nend->op());
+ }
+}
+
+
+TEST_F(GraphReducerTest, Wrapping1) {
+ Node* end = graph()->NewNode(&kOpA0);
+ graph()->SetEnd(end);
+ EXPECT_EQ(1, graph()->NodeCount());
+
+ A0Wrapper r(graph());
+
+ ReduceGraph(&r);
+ EXPECT_EQ(2, graph()->NodeCount());
+
+ Node* nend = graph()->end();
+ EXPECT_NE(end, nend);
+ EXPECT_EQ(&kOpB1, nend->op());
+ EXPECT_EQ(1, nend->InputCount());
+ EXPECT_EQ(end, nend->InputAt(0));
+}
+
+
+TEST_F(GraphReducerTest, Wrapping2) {
+ Node* end = graph()->NewNode(&kOpB0);
+ graph()->SetEnd(end);
+ EXPECT_EQ(1, graph()->NodeCount());
+
+ B0Wrapper r(graph());
+
+ ReduceGraph(&r);
+ EXPECT_EQ(3, graph()->NodeCount());
+
+ Node* nend = graph()->end();
+ EXPECT_NE(end, nend);
+ EXPECT_EQ(&kOpC1, nend->op());
+ EXPECT_EQ(1, nend->InputCount());
+
+ Node* n1 = nend->InputAt(0);
+ EXPECT_NE(end, n1);
+ EXPECT_EQ(&kOpC1, n1->op());
+ EXPECT_EQ(1, n1->InputCount());
+ EXPECT_EQ(end, n1->InputAt(0));
+}
+
+
+TEST_F(GraphReducerTest, Forwarding1) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* end = graph()->NewNode(&kOpA1, n1);
+ graph()->SetEnd(end);
+
+ A1Forwarder r;
+
+ // Tests A1(x) => x
+ for (int i = 0; i < 3; i++) {
+ int before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpA0, n1->op());
+ EXPECT_EQ(n1, graph()->end());
+ }
+}
+
+
+TEST_F(GraphReducerTest, Forwarding2) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+
+ A1Forwarder r;
+
+ // Tests reducing A2(A1(x), A1(y)) => A2(x, y).
+ for (int i = 0; i < 3; i++) {
+ int before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpA0, n1->op());
+ EXPECT_EQ(n1, end->InputAt(0));
+ EXPECT_EQ(n1, end->InputAt(1));
+ EXPECT_EQ(&kOpA2, end->op());
+ EXPECT_EQ(0, n2->UseCount());
+ EXPECT_EQ(0, n3->UseCount());
+ }
+}
+
+
+TEST_F(GraphReducerTest, Forwarding3) {
+ // Tests reducing a chain of A1(A1(A1(A1(x)))) => x.
+ for (int i = 0; i < 8; i++) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* end = n1;
+ for (int j = 0; j < i; j++) {
+ end = graph()->NewNode(&kOpA1, end);
+ }
+ graph()->SetEnd(end);
+
+ A1Forwarder r;
+
+ for (int i = 0; i < 3; i++) {
+ int before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpA0, n1->op());
+ EXPECT_EQ(n1, graph()->end());
+ }
+ }
+}
+
+
+TEST_F(GraphReducerTest, ReduceForward1) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+
+ InPlaceABReducer r;
+ B1Forwarder f;
+
+ // Tests first reducing A => B, then B1(x) => x.
+ for (int i = 0; i < 3; i++) {
+ int before = graph()->NodeCount();
+ ReduceGraph(&r, &f);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpB0, n1->op());
+ EXPECT_TRUE(n2->IsDead());
+ EXPECT_EQ(n1, end->InputAt(0));
+ EXPECT_TRUE(n3->IsDead());
+ EXPECT_EQ(n1, end->InputAt(0));
+ EXPECT_EQ(&kOpB2, end->op());
+ EXPECT_EQ(0, n2->UseCount());
+ EXPECT_EQ(0, n3->UseCount());
+ }
+}
+
+
+TEST_F(GraphReducerTest, Sorter1) {
+ AB2Sorter r;
+ for (int i = 0; i < 6; i++) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = NULL; // Initialize to please the compiler.
+
+ if (i == 0) end = graph()->NewNode(&kOpA2, n2, n3);
+ if (i == 1) end = graph()->NewNode(&kOpA2, n3, n2);
+ if (i == 2) end = graph()->NewNode(&kOpA2, n2, n1);
+ if (i == 3) end = graph()->NewNode(&kOpA2, n1, n2);
+ if (i == 4) end = graph()->NewNode(&kOpA2, n3, n1);
+ if (i == 5) end = graph()->NewNode(&kOpA2, n1, n3);
+
+ graph()->SetEnd(end);
+
+ int before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpA0, n1->op());
+ EXPECT_EQ(&kOpA1, n2->op());
+ EXPECT_EQ(&kOpA1, n3->op());
+ EXPECT_EQ(&kOpA2, end->op());
+ EXPECT_EQ(end, graph()->end());
+ EXPECT_LE(end->InputAt(0)->id(), end->InputAt(1)->id());
+ }
+}
+
+
+// Generate a node graph with the given permutations.
+void GenDAG(Graph* graph, int* p3, int* p2, int* p1) {
+ Node* level4 = graph->NewNode(&kOpA0);
+ Node* level3[] = {graph->NewNode(&kOpA1, level4),
+ graph->NewNode(&kOpA1, level4)};
+
+ Node* level2[] = {graph->NewNode(&kOpA1, level3[p3[0]]),
+ graph->NewNode(&kOpA1, level3[p3[1]]),
+ graph->NewNode(&kOpA1, level3[p3[0]]),
+ graph->NewNode(&kOpA1, level3[p3[1]])};
+
+ Node* level1[] = {graph->NewNode(&kOpA2, level2[p2[0]], level2[p2[1]]),
+ graph->NewNode(&kOpA2, level2[p2[2]], level2[p2[3]])};
+
+ Node* end = graph->NewNode(&kOpA2, level1[p1[0]], level1[p1[1]]);
+ graph->SetEnd(end);
+}
+
+
+TEST_F(GraphReducerTest, SortForwardReduce) {
+ // Tests combined reductions on a series of DAGs.
+ for (int j = 0; j < 2; j++) {
+ int p3[] = {j, 1 - j};
+ for (int m = 0; m < 2; m++) {
+ int p1[] = {m, 1 - m};
+ for (int k = 0; k < 24; k++) { // All permutations of 0, 1, 2, 3
+ int p2[] = {-1, -1, -1, -1};
+ int n = k;
+ for (int d = 4; d >= 1; d--) { // Construct permutation.
+ int p = n % d;
+ for (int z = 0; z < 4; z++) {
+ if (p2[z] == -1) {
+ if (p == 0) p2[z] = d - 1;
+ p--;
+ }
+ }
+ n = n / d;
+ }
+
+ GenDAG(graph(), p3, p2, p1);
+
+ AB2Sorter r1;
+ A1Forwarder r2;
+ InPlaceABReducer r3;
+
+ ReduceGraph(&r1, &r2, &r3);
+
+ Node* end = graph()->end();
+ EXPECT_EQ(&kOpB2, end->op());
+ Node* n1 = end->InputAt(0);
+ Node* n2 = end->InputAt(1);
+ EXPECT_NE(n1, n2);
+ EXPECT_LT(n1->id(), n2->id());
+ EXPECT_EQ(&kOpB2, n1->op());
+ EXPECT_EQ(&kOpB2, n2->op());
+ Node* n4 = n1->InputAt(0);
+ EXPECT_EQ(&kOpB0, n4->op());
+ EXPECT_EQ(n4, n1->InputAt(1));
+ EXPECT_EQ(n4, n2->InputAt(0));
+ EXPECT_EQ(n4, n2->InputAt(1));
+ }
+ }
+ }
+}
+
+
+TEST_F(GraphReducerTest, Order) {
+ // Test that the order of reducers doesn't matter, as they should be
+ // rerun for changed nodes.
+ for (int i = 0; i < 2; i++) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* end = graph()->NewNode(&kOpA1, n1);
+ graph()->SetEnd(end);
+
+ InPlaceABReducer abr;
+ InPlaceBCReducer bcr;
+
+ // Tests A* => C* with in-place updates.
+ for (int j = 0; j < 3; j++) {
+ int before = graph()->NodeCount();
+ if (i == 0) {
+ ReduceGraph(&abr, &bcr);
+ } else {
+ ReduceGraph(&bcr, &abr);
+ }
+
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpC0, n1->op());
+ EXPECT_EQ(&kOpC1, end->op());
+ EXPECT_EQ(n1, end->InputAt(0));
+ }
+ }
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 95432587a8..9da3950e54 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -4,7 +4,7 @@
#include "test/unittests/compiler/graph-unittest.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "test/unittests/compiler/node-test-utils.h"
namespace v8 {
@@ -13,6 +13,7 @@ namespace compiler {
GraphTest::GraphTest(int num_parameters) : common_(zone()), graph_(zone()) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
+ graph()->SetEnd(graph()->NewNode(common()->End(), graph()->start()));
}
@@ -92,8 +93,15 @@ Matcher<Node*> GraphTest::IsTrueConstant() {
}
+Matcher<Node*> GraphTest::IsUndefinedConstant() {
+ return IsHeapConstant(
+ Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+}
+
+
TypedGraphTest::TypedGraphTest(int num_parameters)
- : GraphTest(num_parameters), typer_(graph(), MaybeHandle<Context>()) {}
+ : GraphTest(num_parameters),
+ typer_(isolate(), graph(), MaybeHandle<Context>()) {}
TypedGraphTest::~TypedGraphTest() {}
@@ -105,6 +113,26 @@ Node* TypedGraphTest::Parameter(Type* type, int32_t index) {
return node;
}
+
+namespace {
+
+const Operator kDummyOperator(0, Operator::kNoProperties, "Dummy", 0, 0, 0, 1,
+ 0, 0);
+
+} // namespace
+
+
+TEST_F(GraphTest, NewNode) {
+ Node* n0 = graph()->NewNode(&kDummyOperator);
+ Node* n1 = graph()->NewNode(&kDummyOperator);
+ EXPECT_NE(n0, n1);
+ EXPECT_LT(0, n0->id());
+ EXPECT_LT(0, n1->id());
+ EXPECT_NE(n0->id(), n1->id());
+ EXPECT_EQ(&kDummyOperator, n0->op());
+ EXPECT_EQ(&kDummyOperator, n1->op());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 7c75161d9d..21f8ebf6b4 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -26,12 +26,15 @@ namespace compiler {
using ::testing::Matcher;
-class GraphTest : public TestWithContext, public TestWithZone {
+class GraphTest : public TestWithContext, public TestWithIsolateAndZone {
public:
explicit GraphTest(int num_parameters = 1);
~GraphTest() OVERRIDE;
protected:
+ Node* start() { return graph()->start(); }
+ Node* end() { return graph()->end(); }
+
Node* Parameter(int32_t index = 0);
Node* Float32Constant(volatile float value);
Node* Float64Constant(volatile double value);
@@ -49,6 +52,7 @@ class GraphTest : public TestWithContext, public TestWithZone {
Matcher<Node*> IsFalseConstant();
Matcher<Node*> IsTrueConstant();
+ Matcher<Node*> IsUndefinedConstant();
CommonOperatorBuilder* common() { return &common_; }
Graph* graph() { return &graph_; }
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index c79a9e4eaa..d3e00c642c 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -34,14 +34,15 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
out << "=== Schedule before instruction selection ===" << std::endl
<< *schedule;
}
- EXPECT_NE(0, graph()->NodeCount());
- int initial_node_count = graph()->NodeCount();
- Linkage linkage(test_->zone(), call_descriptor());
+ size_t const node_count = graph()->NodeCount();
+ EXPECT_NE(0u, node_count);
+ Linkage linkage(call_descriptor());
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(test_->zone(), schedule);
- InstructionSequence sequence(test_->zone(), instruction_blocks);
+ InstructionSequence sequence(test_->isolate(), test_->zone(),
+ instruction_blocks);
SourcePositionTable source_position_table(graph());
- InstructionSelector selector(test_->zone(), graph(), &linkage, &sequence,
+ InstructionSelector selector(test_->zone(), node_count, &linkage, &sequence,
schedule, &source_position_table, features);
selector.SelectInstructions();
if (FLAG_trace_turbo) {
@@ -52,19 +53,9 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
<< printable;
}
Stream s;
+ s.virtual_registers_ = selector.GetVirtualRegistersForTesting();
// Map virtual registers.
- {
- const NodeToVregMap& node_map = selector.GetNodeMapForTesting();
- for (int i = 0; i < initial_node_count; ++i) {
- if (node_map[i] != InstructionSelector::kNodeUnmapped) {
- s.virtual_registers_.insert(std::make_pair(i, node_map[i]));
- }
- }
- }
- std::set<int> virtual_registers;
- for (InstructionSequence::const_iterator i = sequence.begin();
- i != sequence.end(); ++i) {
- Instruction* instr = *i;
+ for (Instruction* const instr : sequence) {
if (instr->opcode() < 0) continue;
if (mode == kTargetInstructions) {
switch (instr->arch_opcode()) {
@@ -86,10 +77,6 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
if (output->IsConstant()) {
s.constants_.insert(std::make_pair(
output->index(), sequence.GetConstant(output->index())));
- virtual_registers.insert(output->index());
- } else if (output->IsUnallocated()) {
- virtual_registers.insert(
- UnallocatedOperand::cast(output)->virtual_register());
}
}
for (size_t i = 0; i < instr->InputCount(); ++i) {
@@ -98,16 +85,12 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
if (input->IsImmediate()) {
s.immediates_.insert(std::make_pair(
input->index(), sequence.GetImmediate(input->index())));
- } else if (input->IsUnallocated()) {
- virtual_registers.insert(
- UnallocatedOperand::cast(input)->virtual_register());
}
}
s.instructions_.push_back(instr);
}
- for (std::set<int>::const_iterator i = virtual_registers.begin();
- i != virtual_registers.end(); ++i) {
- int virtual_register = *i;
+ for (auto i : s.virtual_registers_) {
+ int const virtual_register = i.second;
if (sequence.IsDouble(virtual_register)) {
EXPECT_FALSE(sequence.IsReference(virtual_register));
s.doubles_.insert(virtual_register);
@@ -167,7 +150,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
StreamBuilder m(this, kMachFloat32);
m.Return(m.Float32Constant(kValue));
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(3U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
EXPECT_FLOAT_EQ(kValue, s.ToFloat32(s[0]->OutputAt(0)));
@@ -180,7 +163,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
StreamBuilder m(this, kMachInt32, kMachInt32);
m.Return(m.Parameter(0));
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(3U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
@@ -192,7 +175,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
StreamBuilder m(this, kMachInt32);
m.Return(m.Int32Constant(0));
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(3U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
@@ -210,7 +193,7 @@ TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
StreamBuilder m(this, kMachInt32, kMachFloat64);
m.Return(m.TruncateFloat64ToInt32(m.Parameter(0)));
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(3U, s.size());
+ ASSERT_EQ(4U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
EXPECT_EQ(kArchTruncateDoubleToI, s[1]->arch_opcode());
EXPECT_EQ(1U, s[1]->InputCount());
@@ -251,7 +234,7 @@ TARGET_TEST_F(InstructionSelectorTest, Finish) {
Node* finish = m.NewNode(m.common()->Finish(1), param, m.graph()->start());
m.Return(finish);
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(3U, s.size());
+ ASSERT_EQ(4U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
ASSERT_TRUE(s[0]->Output()->IsUnallocated());
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
index e65d68bcca..983e5c0aed 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
@@ -18,7 +18,8 @@ namespace v8 {
namespace internal {
namespace compiler {
-class InstructionSelectorTest : public TestWithContext, public TestWithZone {
+class InstructionSelectorTest : public TestWithContext,
+ public TestWithIsolateAndZone {
public:
InstructionSelectorTest();
~InstructionSelectorTest() OVERRIDE;
@@ -36,19 +37,20 @@ class InstructionSelectorTest : public TestWithContext, public TestWithZone {
class StreamBuilder FINAL : public RawMachineAssembler {
public:
StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
- : RawMachineAssembler(new (test->zone()) Graph(test->zone()),
+ : RawMachineAssembler(test->isolate(),
+ new (test->zone()) Graph(test->zone()),
MakeMachineSignature(test->zone(), return_type)),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type)
: RawMachineAssembler(
- new (test->zone()) Graph(test->zone()),
+ test->isolate(), new (test->zone()) Graph(test->zone()),
MakeMachineSignature(test->zone(), return_type, parameter0_type)),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type, MachineType parameter1_type)
: RawMachineAssembler(
- new (test->zone()) Graph(test->zone()),
+ test->isolate(), new (test->zone()) Graph(test->zone()),
MakeMachineSignature(test->zone(), return_type, parameter0_type,
parameter1_type)),
test_(test) {}
@@ -56,7 +58,7 @@ class InstructionSelectorTest : public TestWithContext, public TestWithZone {
MachineType parameter0_type, MachineType parameter1_type,
MachineType parameter2_type)
: RawMachineAssembler(
- new (test->zone()) Graph(test->zone()),
+ test->isolate(), new (test->zone()) Graph(test->zone()),
MakeMachineSignature(test->zone(), return_type, parameter0_type,
parameter1_type, parameter2_type)),
test_(test) {}
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
index 9546376ee8..001fb11d13 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -69,7 +69,8 @@ RegisterConfiguration* InstructionSequenceTest::config() {
InstructionSequence* InstructionSequenceTest::sequence() {
if (sequence_ == nullptr) {
- sequence_ = new (zone()) InstructionSequence(zone(), &instruction_blocks_);
+ sequence_ = new (zone())
+ InstructionSequence(isolate(), zone(), &instruction_blocks_);
}
return sequence_;
}
@@ -137,7 +138,7 @@ InstructionSequenceTest::TestOperand InstructionSequenceTest::Imm(int32_t imm) {
InstructionSequenceTest::VReg InstructionSequenceTest::Define(
TestOperand output_op) {
VReg vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(vreg, output_op)};
+ InstructionOperand outputs[1]{ConvertOutputOp(vreg, output_op)};
Emit(vreg.value_, kArchNop, 1, outputs);
return vreg;
}
@@ -145,7 +146,7 @@ InstructionSequenceTest::VReg InstructionSequenceTest::Define(
int InstructionSequenceTest::Return(TestOperand input_op_0) {
block_returns_ = true;
- InstructionOperand* inputs[1]{ConvertInputOp(input_op_0)};
+ InstructionOperand inputs[1]{ConvertInputOp(input_op_0)};
return Emit(NewIndex(), kArchRet, 0, nullptr, 1, inputs);
}
@@ -154,20 +155,35 @@ PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
VReg incoming_vreg_1,
VReg incoming_vreg_2,
VReg incoming_vreg_3) {
- auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, 10);
VReg inputs[] = {incoming_vreg_0, incoming_vreg_1, incoming_vreg_2,
incoming_vreg_3};
- for (size_t i = 0; i < arraysize(inputs); ++i) {
- if (inputs[i].value_ == kNoValue) break;
- Extend(phi, inputs[i]);
+ size_t input_count = 0;
+ for (; input_count < arraysize(inputs); ++input_count) {
+ if (inputs[input_count].value_ == kNoValue) break;
}
+ CHECK(input_count > 0);
+ auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, input_count);
+ for (size_t i = 0; i < input_count; ++i) {
+ SetInput(phi, i, inputs[i]);
+ }
+ current_block_->AddPhi(phi);
+ return phi;
+}
+
+
+PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
+ size_t input_count) {
+ auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, input_count);
+ SetInput(phi, 0, incoming_vreg_0);
current_block_->AddPhi(phi);
return phi;
}
-void InstructionSequenceTest::Extend(PhiInstruction* phi, VReg vreg) {
- phi->Extend(zone(), vreg.value_);
+void InstructionSequenceTest::SetInput(PhiInstruction* phi, size_t input,
+ VReg vreg) {
+ CHECK(vreg.value_ != kNoValue);
+ phi->SetInput(input, vreg.value_);
}
@@ -175,7 +191,7 @@ InstructionSequenceTest::VReg InstructionSequenceTest::DefineConstant(
int32_t imm) {
VReg vreg = NewReg();
sequence()->AddConstant(vreg.value_, Constant(imm));
- InstructionOperand* outputs[1]{ConstantOperand::Create(vreg.value_, zone())};
+ InstructionOperand outputs[1]{ConstantOperand(vreg.value_)};
Emit(vreg.value_, kArchNop, 1, outputs);
return vreg;
}
@@ -195,7 +211,7 @@ static size_t CountInputs(size_t size,
int InstructionSequenceTest::EmitI(size_t input_size, TestOperand* inputs) {
- InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
+ InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
return Emit(NewIndex(), kArchNop, 0, nullptr, input_size, mapped_inputs);
}
@@ -212,8 +228,8 @@ int InstructionSequenceTest::EmitI(TestOperand input_op_0,
InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
TestOperand output_op, size_t input_size, TestOperand* inputs) {
VReg output_vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
- InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
+ InstructionOperand outputs[1]{ConvertOutputOp(output_vreg, output_op)};
+ InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
Emit(output_vreg.value_, kArchNop, 1, outputs, input_size, mapped_inputs);
return output_vreg;
}
@@ -230,9 +246,9 @@ InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
TestOperand output_op, size_t input_size, TestOperand* inputs) {
VReg output_vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
- CHECK(UnallocatedOperand::cast(outputs[0])->HasFixedPolicy());
- InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
+ InstructionOperand outputs[1]{ConvertOutputOp(output_vreg, output_op)};
+ CHECK(UnallocatedOperand::cast(outputs[0]).HasFixedPolicy());
+ InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
Emit(output_vreg.value_, kArchCallCodeObject, 1, outputs, input_size,
mapped_inputs, 0, nullptr, true);
return output_vreg;
@@ -256,8 +272,8 @@ const Instruction* InstructionSequenceTest::GetInstruction(
int InstructionSequenceTest::EmitBranch(TestOperand input_op) {
- InstructionOperand* inputs[4]{ConvertInputOp(input_op), ConvertInputOp(Imm()),
- ConvertInputOp(Imm()), ConvertInputOp(Imm())};
+ InstructionOperand inputs[4]{ConvertInputOp(input_op), ConvertInputOp(Imm()),
+ ConvertInputOp(Imm()), ConvertInputOp(Imm())};
InstructionCode opcode = kArchJmp | FlagsModeField::encode(kFlags_branch) |
FlagsConditionField::encode(kEqual);
auto instruction =
@@ -273,7 +289,7 @@ int InstructionSequenceTest::EmitFallThrough() {
int InstructionSequenceTest::EmitJump() {
- InstructionOperand* inputs[1]{ConvertInputOp(Imm())};
+ InstructionOperand inputs[1]{ConvertInputOp(Imm())};
auto instruction =
NewInstruction(kArchJmp, 0, nullptr, 1, inputs)->MarkAsControl();
return AddInstruction(NewIndex(), instruction);
@@ -281,52 +297,44 @@ int InstructionSequenceTest::EmitJump() {
Instruction* InstructionSequenceTest::NewInstruction(
- InstructionCode code, size_t outputs_size, InstructionOperand** outputs,
- size_t inputs_size, InstructionOperand** inputs, size_t temps_size,
- InstructionOperand** temps) {
- CHECK_NE(nullptr, current_block_);
+ InstructionCode code, size_t outputs_size, InstructionOperand* outputs,
+ size_t inputs_size, InstructionOperand* inputs, size_t temps_size,
+ InstructionOperand* temps) {
+ CHECK(current_block_);
return Instruction::New(zone(), code, outputs_size, outputs, inputs_size,
inputs, temps_size, temps);
}
-InstructionOperand* InstructionSequenceTest::Unallocated(
+InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::ExtendedPolicy policy) {
- auto unallocated = new (zone()) UnallocatedOperand(policy);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
+ return UnallocatedOperand(policy, op.vreg_.value_);
}
-InstructionOperand* InstructionSequenceTest::Unallocated(
+InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::ExtendedPolicy policy,
UnallocatedOperand::Lifetime lifetime) {
- auto unallocated = new (zone()) UnallocatedOperand(policy, lifetime);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
+ return UnallocatedOperand(policy, lifetime, op.vreg_.value_);
}
-InstructionOperand* InstructionSequenceTest::Unallocated(
+InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::ExtendedPolicy policy, int index) {
- auto unallocated = new (zone()) UnallocatedOperand(policy, index);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
+ return UnallocatedOperand(policy, index, op.vreg_.value_);
}
-InstructionOperand* InstructionSequenceTest::Unallocated(
+InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::BasicPolicy policy, int index) {
- auto unallocated = new (zone()) UnallocatedOperand(policy, index);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
+ return UnallocatedOperand(policy, index, op.vreg_.value_);
}
-InstructionOperand** InstructionSequenceTest::ConvertInputs(
+InstructionOperand* InstructionSequenceTest::ConvertInputs(
size_t input_size, TestOperand* inputs) {
- InstructionOperand** mapped_inputs =
- zone()->NewArray<InstructionOperand*>(static_cast<int>(input_size));
+ InstructionOperand* mapped_inputs =
+ zone()->NewArray<InstructionOperand>(static_cast<int>(input_size));
for (size_t i = 0; i < input_size; ++i) {
mapped_inputs[i] = ConvertInputOp(inputs[i]);
}
@@ -334,10 +342,10 @@ InstructionOperand** InstructionSequenceTest::ConvertInputs(
}
-InstructionOperand* InstructionSequenceTest::ConvertInputOp(TestOperand op) {
+InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
if (op.type_ == kImmediate) {
CHECK_EQ(op.vreg_.value_, kNoValue);
- return ImmediateOperand::Create(op.value_, zone());
+ return ImmediateOperand(op.value_);
}
CHECK_NE(op.vreg_.value_, kNoValue);
switch (op.type_) {
@@ -360,12 +368,12 @@ InstructionOperand* InstructionSequenceTest::ConvertInputOp(TestOperand op) {
break;
}
CHECK(false);
- return NULL;
+ return InstructionOperand();
}
-InstructionOperand* InstructionSequenceTest::ConvertOutputOp(VReg vreg,
- TestOperand op) {
+InstructionOperand InstructionSequenceTest::ConvertOutputOp(VReg vreg,
+ TestOperand op) {
CHECK_EQ(op.vreg_.value_, kNoValue);
op.vreg_ = vreg;
switch (op.type_) {
@@ -382,7 +390,7 @@ InstructionOperand* InstructionSequenceTest::ConvertOutputOp(VReg vreg,
break;
}
CHECK(false);
- return NULL;
+ return InstructionOperand();
}
@@ -418,7 +426,7 @@ InstructionBlock* InstructionSequenceTest::NewBlock() {
void InstructionSequenceTest::WireBlocks() {
- CHECK_EQ(nullptr, current_block());
+ CHECK(!current_block());
CHECK(instruction_blocks_.size() == completions_.size());
size_t offset = 0;
for (const auto& completion : completions_) {
@@ -452,11 +460,10 @@ void InstructionSequenceTest::WireBlock(size_t block_offset, int jump_offset) {
int InstructionSequenceTest::Emit(int instruction_index, InstructionCode code,
size_t outputs_size,
- InstructionOperand** outputs,
+ InstructionOperand* outputs,
size_t inputs_size,
- InstructionOperand** inputs,
- size_t temps_size, InstructionOperand** temps,
- bool is_call) {
+ InstructionOperand* inputs, size_t temps_size,
+ InstructionOperand* temps, bool is_call) {
auto instruction = NewInstruction(code, outputs_size, outputs, inputs_size,
inputs, temps_size, temps);
if (is_call) instruction->MarkAsCall();
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
index ce0a5b460d..613e25883e 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-class InstructionSequenceTest : public TestWithZone {
+class InstructionSequenceTest : public TestWithIsolateAndZone {
public:
static const int kDefaultNRegs = 4;
static const int kNoValue = kMinInt;
@@ -138,7 +138,8 @@ class InstructionSequenceTest : public TestWithZone {
VReg incoming_vreg_1 = VReg(),
VReg incoming_vreg_2 = VReg(),
VReg incoming_vreg_3 = VReg());
- void Extend(PhiInstruction* phi, VReg vreg);
+ PhiInstruction* Phi(VReg incoming_vreg_0, size_t input_count);
+ void SetInput(PhiInstruction* phi, size_t input, VReg vreg);
VReg DefineConstant(int32_t imm = 0);
int EmitNop();
@@ -179,32 +180,32 @@ class InstructionSequenceTest : public TestWithZone {
int EmitFallThrough();
int EmitJump();
Instruction* NewInstruction(InstructionCode code, size_t outputs_size,
- InstructionOperand** outputs,
+ InstructionOperand* outputs,
size_t inputs_size = 0,
- InstructionOperand* *inputs = nullptr,
+ InstructionOperand* inputs = nullptr,
size_t temps_size = 0,
- InstructionOperand* *temps = nullptr);
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::ExtendedPolicy policy);
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::ExtendedPolicy policy,
- UnallocatedOperand::Lifetime lifetime);
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::ExtendedPolicy policy,
- int index);
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::BasicPolicy policy,
- int index);
- InstructionOperand** ConvertInputs(size_t input_size, TestOperand* inputs);
- InstructionOperand* ConvertInputOp(TestOperand op);
- InstructionOperand* ConvertOutputOp(VReg vreg, TestOperand op);
+ InstructionOperand* temps = nullptr);
+ InstructionOperand Unallocated(TestOperand op,
+ UnallocatedOperand::ExtendedPolicy policy);
+ InstructionOperand Unallocated(TestOperand op,
+ UnallocatedOperand::ExtendedPolicy policy,
+ UnallocatedOperand::Lifetime lifetime);
+ InstructionOperand Unallocated(TestOperand op,
+ UnallocatedOperand::ExtendedPolicy policy,
+ int index);
+ InstructionOperand Unallocated(TestOperand op,
+ UnallocatedOperand::BasicPolicy policy,
+ int index);
+ InstructionOperand* ConvertInputs(size_t input_size, TestOperand* inputs);
+ InstructionOperand ConvertInputOp(TestOperand op);
+ InstructionOperand ConvertOutputOp(VReg vreg, TestOperand op);
InstructionBlock* NewBlock();
void WireBlock(size_t block_offset, int jump_offset);
int Emit(int instruction_index, InstructionCode code, size_t outputs_size = 0,
- InstructionOperand* *outputs = nullptr, size_t inputs_size = 0,
- InstructionOperand* *inputs = nullptr, size_t temps_size = 0,
- InstructionOperand* *temps = nullptr, bool is_call = false);
+ InstructionOperand* outputs = nullptr, size_t inputs_size = 0,
+ InstructionOperand* inputs = nullptr, size_t temps_size = 0,
+ InstructionOperand* temps = nullptr, bool is_call = false);
int AddInstruction(int instruction_index, Instruction* instruction);
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index eeb401373f..b5c688e147 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -4,7 +4,7 @@
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/typer.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -25,7 +25,7 @@ class JSBuiltinReducerTest : public TypedGraphTest {
Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::Flag::kNoFlags) {
MachineOperatorBuilder machine(zone(), kMachPtr, flags);
- JSGraph jsgraph(graph(), common(), javascript(), &machine);
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &machine);
JSBuiltinReducer reducer(&jsgraph);
return reducer.Reduce(node);
}
@@ -53,12 +53,10 @@ namespace {
// TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
Type* const kNumberTypes[] = {
- Type::UnsignedSmall(), Type::NegativeSigned32(),
- Type::NonNegativeSigned32(), Type::SignedSmall(),
- Type::Signed32(), Type::Unsigned32(),
- Type::Integral32(), Type::MinusZero(),
- Type::NaN(), Type::OrderedNumber(),
- Type::PlainNumber(), Type::Number()};
+ Type::UnsignedSmall(), Type::Negative32(), Type::Unsigned31(),
+ Type::SignedSmall(), Type::Signed32(), Type::Unsigned32(),
+ Type::Integral32(), Type::MinusZero(), Type::NaN(),
+ Type::OrderedNumber(), Type::PlainNumber(), Type::Number()};
} // namespace
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
new file mode 100644
index 0000000000..20d5c069fe
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -0,0 +1,215 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-intrinsic-lowering.h"
+#include "src/compiler/js-operator.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::AllOf;
+using testing::BitEq;
+using testing::Capture;
+using testing::CaptureEq;
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSIntrinsicLoweringTest : public GraphTest {
+ public:
+ JSIntrinsicLoweringTest() : GraphTest(3), javascript_(zone()) {}
+ ~JSIntrinsicLoweringTest() OVERRIDE {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &machine);
+ JSIntrinsicLowering reducer(&jsgraph);
+ return reducer.Reduce(node);
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+
+// -----------------------------------------------------------------------------
+// %_IsSmi
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsSmi) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsSmi, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsObjectIsSmi(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsNonNegativeSmi
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsNonNegativeSmi) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineIsNonNegativeSmi, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsObjectIsNonNegativeSmi(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsArray
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsArray) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsArray, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ static_cast<MachineType>(kTypeBool | kRepTagged), IsFalseConstant(),
+ IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input,
+ effect, CaptureEq(&if_false)),
+ effect, _),
+ IsInt32Constant(JS_ARRAY_TYPE)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsFunction
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsFunction) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsFunction, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ static_cast<MachineType>(kTypeBool | kRepTagged), IsFalseConstant(),
+ IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input,
+ effect, CaptureEq(&if_false)),
+ effect, _),
+ IsInt32Constant(JS_FUNCTION_TYPE)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsRegExp
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsRegExp) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsRegExp, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ static_cast<MachineType>(kTypeBool | kRepTagged), IsFalseConstant(),
+ IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input,
+ effect, CaptureEq(&if_false)),
+ effect, _),
+ IsInt32Constant(JS_REGEXP_TYPE)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_ValueOf
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineValueOf) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineValueOf, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch0, if_false0, branch1, if_true1;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ kMachAnyTagged, input,
+ IsPhi(kMachAnyTagged, IsLoadField(AccessBuilder::ForValue(), input,
+ effect, CaptureEq(&if_true1)),
+ input,
+ IsMerge(
+ AllOf(CaptureEq(&if_true1), IsIfTrue(CaptureEq(&branch1))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch1),
+ IsBranch(
+ IsWord32Equal(
+ IsLoadField(
+ AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input,
+ effect, CaptureEq(&if_false0)),
+ effect, _),
+ IsInt32Constant(JS_VALUE_TYPE)),
+ CaptureEq(&if_false0)))))),
+ IsMerge(
+ IsIfTrue(AllOf(CaptureEq(&branch0),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false0), IsIfFalse(CaptureEq(&branch0))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 7aa0c6447a..8f4622ae67 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -66,9 +66,9 @@ const SharedOperator kSharedOperators[] = {
SHARED(Modulus, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
SHARED(UnaryNot, Operator::kPure, 1, 0, 0, 0, 1, 0),
SHARED(ToBoolean, Operator::kPure, 1, 0, 0, 0, 1, 0),
- SHARED(ToNumber, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1),
SHARED(ToString, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(ToName, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+ SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1),
SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1),
SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
SHARED(Create, Operator::kEliminatable, 0, 0, 1, 1, 1, 1),
@@ -77,7 +77,7 @@ const SharedOperator kSharedOperators[] = {
SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
SHARED(Debugger, Operator::kNoProperties, 0, 0, 1, 1, 0, 1),
SHARED(CreateFunctionContext, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
+ SHARED(CreateWithContext, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
SHARED(CreateBlockContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
SHARED(CreateScriptContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1)
@@ -152,11 +152,11 @@ INSTANTIATE_TEST_CASE_P(JSOperatorTest, JSSharedOperatorTest,
class JSStorePropertyOperatorTest
: public TestWithZone,
- public ::testing::WithParamInterface<StrictMode> {};
+ public ::testing::WithParamInterface<LanguageMode> {};
TEST_P(JSStorePropertyOperatorTest, InstancesAreGloballyShared) {
- const StrictMode mode = GetParam();
+ const LanguageMode mode = GetParam();
JSOperatorBuilder javascript1(zone());
JSOperatorBuilder javascript2(zone());
EXPECT_EQ(javascript1.StoreProperty(mode), javascript2.StoreProperty(mode));
@@ -165,7 +165,7 @@ TEST_P(JSStorePropertyOperatorTest, InstancesAreGloballyShared) {
TEST_P(JSStorePropertyOperatorTest, NumberOfInputsAndOutputs) {
JSOperatorBuilder javascript(zone());
- const StrictMode mode = GetParam();
+ const LanguageMode mode = GetParam();
const Operator* op = javascript.StoreProperty(mode);
// TODO(jarin): Get rid of this hack.
@@ -187,7 +187,7 @@ TEST_P(JSStorePropertyOperatorTest, NumberOfInputsAndOutputs) {
TEST_P(JSStorePropertyOperatorTest, OpcodeIsCorrect) {
JSOperatorBuilder javascript(zone());
- const StrictMode mode = GetParam();
+ const LanguageMode mode = GetParam();
const Operator* op = javascript.StoreProperty(mode);
EXPECT_EQ(IrOpcode::kJSStoreProperty, op->opcode());
}
@@ -195,15 +195,15 @@ TEST_P(JSStorePropertyOperatorTest, OpcodeIsCorrect) {
TEST_P(JSStorePropertyOperatorTest, OpParameter) {
JSOperatorBuilder javascript(zone());
- const StrictMode mode = GetParam();
+ const LanguageMode mode = GetParam();
const Operator* op = javascript.StoreProperty(mode);
- EXPECT_EQ(mode, OpParameter<StrictMode>(op));
+ EXPECT_EQ(mode, OpParameter<LanguageMode>(op));
}
TEST_P(JSStorePropertyOperatorTest, Properties) {
JSOperatorBuilder javascript(zone());
- const StrictMode mode = GetParam();
+ const LanguageMode mode = GetParam();
const Operator* op = javascript.StoreProperty(mode);
EXPECT_EQ(Operator::kNoProperties, op->properties());
}
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 97ff106329..d61a1817b2 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -7,7 +7,7 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -62,7 +62,8 @@ Type* const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
Type::Number(), Type::String(), Type::Object()};
-const StrictMode kStrictModes[] = {SLOPPY, STRICT};
+STATIC_ASSERT(LANGUAGE_END == 3);
+const LanguageMode kLanguageModes[] = {SLOPPY, STRICT, STRONG};
} // namespace
@@ -75,11 +76,17 @@ class JSTypedLoweringTest : public TypedGraphTest {
protected:
Reduction Reduce(Node* node) {
MachineOperatorBuilder machine(zone());
- JSGraph jsgraph(graph(), common(), javascript(), &machine);
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &machine);
JSTypedLowering reducer(&jsgraph, zone());
return reducer.Reduce(node);
}
+ Node* EmptyFrameState() {
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &machine);
+ return jsgraph.EmptyFrameState();
+ }
+
Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
Handle<JSArrayBuffer> buffer = factory()->NewJSArrayBuffer();
Runtime::SetupArrayBuffer(isolate(), buffer, true, bytes, byte_length);
@@ -113,7 +120,6 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithBoolean) {
TEST_F(JSTypedLoweringTest, JSUnaryNotWithFalsish) {
- Handle<Object> zero = factory()->NewNumber(0);
Node* input = Parameter(
Type::Union(
Type::MinusZero(),
@@ -127,7 +133,7 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithFalsish) {
Type::Undetectable(),
Type::Union(
Type::Constant(factory()->false_value(), zone()),
- Type::Range(zero, zero, zone()), zone()),
+ Type::Range(0.0, 0.0, zone()), zone()),
zone()),
zone()),
zone()),
@@ -158,9 +164,7 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithTruish) {
TEST_F(JSTypedLoweringTest, JSUnaryNotWithNonZeroPlainNumber) {
- Node* input = Parameter(
- Type::Range(factory()->NewNumber(1), factory()->NewNumber(42), zone()),
- 0);
+ Node* input = Parameter(Type::Range(1.0, 42.0, zone()), 0);
Node* context = Parameter(Type::Any(), 1);
Reduction r =
Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
@@ -222,7 +226,7 @@ TEST_F(JSTypedLoweringTest, ParameterWithNull) {
TEST_F(JSTypedLoweringTest, ParameterWithNaN) {
- const double kNaNs[] = {base::OS::nan_value(),
+ const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::signaling_NaN()};
TRACED_FOREACH(double, nan, kNaNs) {
@@ -253,8 +257,7 @@ TEST_F(JSTypedLoweringTest, ParameterWithPlainNumber) {
EXPECT_THAT(r.replacement(), IsNumberConstant(value));
}
TRACED_FOREACH(double, value, kIntegerValues) {
- Handle<Object> constant = factory()->NewNumber(value);
- Reduction r = Reduce(Parameter(Type::Range(constant, constant, zone())));
+ Reduction r = Reduce(Parameter(Type::Range(value, value, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(value));
}
@@ -293,7 +296,6 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithBoolean) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithFalsish) {
- Handle<Object> zero = factory()->NewNumber(0);
Node* input = Parameter(
Type::Union(
Type::MinusZero(),
@@ -307,7 +309,7 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithFalsish) {
Type::Undetectable(),
Type::Union(
Type::Constant(factory()->false_value(), zone()),
- Type::Range(zero, zero, zone()), zone()),
+ Type::Range(0.0, 0.0, zone()), zone()),
zone()),
zone()),
zone()),
@@ -338,10 +340,7 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
- Node* input =
- Parameter(Type::Range(factory()->NewNumber(1),
- factory()->NewNumber(V8_INFINITY), zone()),
- 0);
+ Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
Node* context = Parameter(Type::Any(), 1);
Reduction r =
Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
@@ -369,8 +368,12 @@ TEST_F(JSTypedLoweringTest, JSToNumberWithPlainPrimitive) {
Node* const context = Parameter(Type::Any(), 1);
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ToNumber(), input,
- context, effect, control));
+ Reduction r =
+ FLAG_turbo_deoptimization
+ ? Reduce(graph()->NewNode(javascript()->ToNumber(), input, context,
+ EmptyFrameState(), effect, control))
+ : Reduce(graph()->NewNode(javascript()->ToNumber(), input, context,
+ effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsToNumber(input, IsNumberConstant(BitEq(0.0)),
graph()->start(), control));
@@ -583,8 +586,7 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
int const element_size = static_cast<int>(array->element_size());
Node* key = Parameter(
- Type::Range(factory()->NewNumber(kMinInt / element_size),
- factory()->NewNumber(kMaxInt / element_size), zone()));
+ Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
Node* base = HeapConstant(array);
Node* context = UndefinedConstant();
Node* effect = graph()->start();
@@ -630,8 +632,7 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArrayWithSafeKey) {
int min = random_number_generator()->NextInt(static_cast<int>(kLength));
int max = random_number_generator()->NextInt(static_cast<int>(kLength));
if (min > max) std::swap(min, max);
- Node* key = Parameter(Type::Range(factory()->NewNumber(min),
- factory()->NewNumber(max), zone()));
+ Node* key = Parameter(Type::Range(min, max, zone()));
Node* base = HeapConstant(array);
Node* context = UndefinedConstant();
Node* effect = graph()->start();
@@ -665,21 +666,20 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
int const element_size = static_cast<int>(array->element_size());
Node* key = Parameter(
- Type::Range(factory()->NewNumber(kMinInt / element_size),
- factory()->NewNumber(kMaxInt / element_size), zone()));
+ Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
Node* base = HeapConstant(array);
Node* value =
Parameter(AccessBuilder::ForTypedArrayElement(type, true).type);
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
+ Node* node = graph()->NewNode(javascript()->StoreProperty(language_mode),
base, key, value, context);
if (FLAG_turbo_deoptimization) {
node->AppendInput(zone(), UndefinedConstant());
@@ -712,20 +712,19 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
int const element_size = static_cast<int>(array->element_size());
Node* key = Parameter(
- Type::Range(factory()->NewNumber(kMinInt / element_size),
- factory()->NewNumber(kMaxInt / element_size), zone()));
+ Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
Node* base = HeapConstant(array);
Node* value = Parameter(Type::Any());
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
+ Node* node = graph()->NewNode(javascript()->StoreProperty(language_mode),
base, key, value, context);
if (FLAG_turbo_deoptimization) {
node->AppendInput(zone(), UndefinedConstant());
@@ -769,7 +768,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
@@ -777,14 +776,13 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
int min = random_number_generator()->NextInt(static_cast<int>(kLength));
int max = random_number_generator()->NextInt(static_cast<int>(kLength));
if (min > max) std::swap(min, max);
- Node* key = Parameter(Type::Range(factory()->NewNumber(min),
- factory()->NewNumber(max), zone()));
+ Node* key = Parameter(Type::Range(min, max, zone()));
Node* base = HeapConstant(array);
Node* value = Parameter(access.type);
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
+ Node* node = graph()->NewNode(javascript()->StoreProperty(language_mode),
base, key, value, context);
if (FLAG_turbo_deoptimization) {
node->AppendInput(zone(), UndefinedConstant());
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
new file mode 100644
index 0000000000..dd0c46c42a
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -0,0 +1,451 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/loop-peeling.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::BitEq;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct While {
+ Node* loop;
+ Node* branch;
+ Node* if_true;
+ Node* exit;
+};
+
+
+// A helper for building branches.
+struct Branch {
+ Node* branch;
+ Node* if_true;
+ Node* if_false;
+};
+
+
+// A helper for building counters attached to loops.
+struct Counter {
+ Node* base;
+ Node* inc;
+ Node* phi;
+ Node* add;
+};
+
+
+class LoopPeelingTest : public GraphTest {
+ public:
+ LoopPeelingTest() : GraphTest(1), machine_(zone()) {}
+ ~LoopPeelingTest() OVERRIDE {}
+
+ protected:
+ MachineOperatorBuilder machine_;
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ LoopTree* GetLoopTree() {
+ if (FLAG_trace_turbo_graph) {
+ OFStream os(stdout);
+ os << AsRPO(*graph());
+ }
+ Zone zone;
+ return LoopFinder::BuildLoopTree(graph(), &zone);
+ }
+
+
+ PeeledIteration* PeelOne() {
+ LoopTree* loop_tree = GetLoopTree();
+ return Peel(loop_tree, loop_tree->outer_loops()[0]);
+ }
+
+ PeeledIteration* Peel(LoopTree* loop_tree, LoopTree::Loop* loop) {
+ PeeledIteration* peeled =
+ LoopPeeler::Peel(graph(), common(), loop_tree, loop, zone());
+ if (FLAG_trace_turbo_graph) {
+ OFStream os(stdout);
+ os << AsRPO(*graph());
+ }
+ return peeled;
+ }
+
+ Node* InsertReturn(Node* val, Node* effect, Node* control) {
+ Node* r = graph()->NewNode(common()->Return(), val, effect, control);
+ graph()->SetEnd(r);
+ return r;
+ }
+
+ Node* ExpectPeeled(Node* node, PeeledIteration* iter) {
+ Node* p = iter->map(node);
+ EXPECT_NE(node, p);
+ return p;
+ }
+
+ void ExpectNotPeeled(Node* node, PeeledIteration* iter) {
+ EXPECT_EQ(node, iter->map(node));
+ }
+
+ While NewWhile(Node* cond, Node* control = nullptr) {
+ if (control == nullptr) control = start();
+ Node* loop = graph()->NewNode(common()->Loop(2), control, control);
+ Node* branch = graph()->NewNode(common()->Branch(), cond, loop);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* exit = graph()->NewNode(common()->IfFalse(), branch);
+ loop->ReplaceInput(1, if_true);
+ return {loop, branch, if_true, exit};
+ }
+
+ void Chain(While* a, Node* control) { a->loop->ReplaceInput(0, control); }
+ void Nest(While* a, While* b) {
+ b->loop->ReplaceInput(1, a->exit);
+ a->loop->ReplaceInput(0, b->if_true);
+ }
+ Node* NewPhi(While* w, Node* a, Node* b) {
+ return graph()->NewNode(common()->Phi(kMachAnyTagged, 2), a, b, w->loop);
+ }
+
+ Branch NewBranch(Node* cond, Node* control = nullptr) {
+ if (control == nullptr) control = start();
+ Node* branch = graph()->NewNode(common()->Branch(), cond, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ return {branch, if_true, if_false};
+ }
+
+ Counter NewCounter(While* w, int32_t b, int32_t k) {
+ Node* base = Int32Constant(b);
+ Node* inc = Int32Constant(k);
+ Node* phi =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), base, base, w->loop);
+ Node* add = graph()->NewNode(machine()->Int32Add(), phi, inc);
+ phi->ReplaceInput(1, add);
+ return {base, inc, phi, add};
+ }
+};
+
+
+TEST_F(LoopPeelingTest, SimpleLoop) {
+ Node* p0 = Parameter(0);
+ While w = NewWhile(p0);
+ Node* r = InsertReturn(p0, start(), w.exit);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* br1 = ExpectPeeled(w.branch, peeled);
+ Node* if_true1 = ExpectPeeled(w.if_true, peeled);
+ Node* if_false1 = ExpectPeeled(w.exit, peeled);
+
+ EXPECT_THAT(br1, IsBranch(p0, start()));
+ EXPECT_THAT(if_true1, IsIfTrue(br1));
+ EXPECT_THAT(if_false1, IsIfFalse(br1));
+
+ EXPECT_THAT(w.loop, IsLoop(if_true1, w.if_true));
+ EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(w.exit, if_false1)));
+}
+
+
+TEST_F(LoopPeelingTest, SimpleLoopWithCounter) {
+ Node* p0 = Parameter(0);
+ While w = NewWhile(p0);
+ Counter c = NewCounter(&w, 0, 1);
+ Node* r = InsertReturn(c.phi, start(), w.exit);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* br1 = ExpectPeeled(w.branch, peeled);
+ Node* if_true1 = ExpectPeeled(w.if_true, peeled);
+ Node* if_false1 = ExpectPeeled(w.exit, peeled);
+
+ EXPECT_THAT(br1, IsBranch(p0, start()));
+ EXPECT_THAT(if_true1, IsIfTrue(br1));
+ EXPECT_THAT(if_false1, IsIfFalse(br1));
+ EXPECT_THAT(w.loop, IsLoop(if_true1, w.if_true));
+
+ EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
+
+ Capture<Node*> merge;
+ EXPECT_THAT(
+ r, IsReturn(IsPhi(kMachAnyTagged, c.phi, c.base,
+ AllOf(CaptureEq(&merge), IsMerge(w.exit, if_false1))),
+ start(), CaptureEq(&merge)));
+}
+
+
+TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_outer) {
+ Node* p0 = Parameter(0);
+ While outer = NewWhile(p0);
+ While inner = NewWhile(p0);
+ Nest(&inner, &outer);
+
+ Counter c = NewCounter(&outer, 0, 1);
+ Node* r = InsertReturn(c.phi, start(), outer.exit);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* bro = ExpectPeeled(outer.branch, peeled);
+ Node* if_trueo = ExpectPeeled(outer.if_true, peeled);
+ Node* if_falseo = ExpectPeeled(outer.exit, peeled);
+
+ EXPECT_THAT(bro, IsBranch(p0, start()));
+ EXPECT_THAT(if_trueo, IsIfTrue(bro));
+ EXPECT_THAT(if_falseo, IsIfFalse(bro));
+
+ Node* bri = ExpectPeeled(inner.branch, peeled);
+ Node* if_truei = ExpectPeeled(inner.if_true, peeled);
+ Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+
+ EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
+ EXPECT_THAT(if_truei, IsIfTrue(bri));
+ EXPECT_THAT(if_falsei, IsIfFalse(bri));
+
+ EXPECT_THAT(outer.loop, IsLoop(if_falsei, inner.exit));
+ EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
+
+ Capture<Node*> merge;
+ EXPECT_THAT(
+ r,
+ IsReturn(IsPhi(kMachAnyTagged, c.phi, c.base,
+ AllOf(CaptureEq(&merge), IsMerge(outer.exit, if_falseo))),
+ start(), CaptureEq(&merge)));
+}
+
+
+TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_inner) {
+ Node* p0 = Parameter(0);
+ While outer = NewWhile(p0);
+ While inner = NewWhile(p0);
+ Nest(&inner, &outer);
+
+ Counter c = NewCounter(&outer, 0, 1);
+ Node* r = InsertReturn(c.phi, start(), outer.exit);
+
+ LoopTree* loop_tree = GetLoopTree();
+ LoopTree::Loop* loop = loop_tree->ContainingLoop(inner.loop);
+ EXPECT_NE(nullptr, loop);
+ EXPECT_EQ(1u, loop->depth());
+
+ PeeledIteration* peeled = Peel(loop_tree, loop);
+
+ ExpectNotPeeled(outer.loop, peeled);
+ ExpectNotPeeled(outer.branch, peeled);
+ ExpectNotPeeled(outer.if_true, peeled);
+ ExpectNotPeeled(outer.exit, peeled);
+
+ Node* bri = ExpectPeeled(inner.branch, peeled);
+ Node* if_truei = ExpectPeeled(inner.if_true, peeled);
+ Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+
+ EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
+ EXPECT_THAT(if_truei, IsIfTrue(bri));
+ EXPECT_THAT(if_falsei, IsIfFalse(bri));
+
+ EXPECT_THAT(outer.loop, IsLoop(start(), IsMerge(inner.exit, if_falsei)));
+ ExpectNotPeeled(c.add, peeled);
+
+ EXPECT_THAT(r, IsReturn(c.phi, start(), outer.exit));
+}
+
+
+TEST_F(LoopPeelingTest, SimpleInnerCounter_peel_inner) {
+ Node* p0 = Parameter(0);
+ While outer = NewWhile(p0);
+ While inner = NewWhile(p0);
+ Nest(&inner, &outer);
+ Counter c = NewCounter(&inner, 0, 1);
+ Node* phi = NewPhi(&outer, Int32Constant(11), c.phi);
+
+ Node* r = InsertReturn(phi, start(), outer.exit);
+
+ LoopTree* loop_tree = GetLoopTree();
+ LoopTree::Loop* loop = loop_tree->ContainingLoop(inner.loop);
+ EXPECT_NE(nullptr, loop);
+ EXPECT_EQ(1u, loop->depth());
+
+ PeeledIteration* peeled = Peel(loop_tree, loop);
+
+ ExpectNotPeeled(outer.loop, peeled);
+ ExpectNotPeeled(outer.branch, peeled);
+ ExpectNotPeeled(outer.if_true, peeled);
+ ExpectNotPeeled(outer.exit, peeled);
+
+ Node* bri = ExpectPeeled(inner.branch, peeled);
+ Node* if_truei = ExpectPeeled(inner.if_true, peeled);
+ Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+
+ EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
+ EXPECT_THAT(if_truei, IsIfTrue(bri));
+ EXPECT_THAT(if_falsei, IsIfFalse(bri));
+
+ EXPECT_THAT(outer.loop, IsLoop(start(), IsMerge(inner.exit, if_falsei)));
+ EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
+
+ Node* back = phi->InputAt(1);
+ EXPECT_THAT(back, IsPhi(kMachAnyTagged, c.phi, c.base,
+ IsMerge(inner.exit, if_falsei)));
+
+ EXPECT_THAT(phi,
+ IsPhi(kMachAnyTagged, IsInt32Constant(11), back, outer.loop));
+
+ EXPECT_THAT(r, IsReturn(phi, start(), outer.exit));
+}
+
+
+TEST_F(LoopPeelingTest, TwoBackedgeLoop) {
+ Node* p0 = Parameter(0);
+ Node* loop = graph()->NewNode(common()->Loop(3), start(), start(), start());
+ Branch b1 = NewBranch(p0, loop);
+ Branch b2 = NewBranch(p0, b1.if_true);
+
+ loop->ReplaceInput(1, b2.if_true);
+ loop->ReplaceInput(2, b2.if_false);
+
+ Node* r = InsertReturn(p0, start(), b1.if_false);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* b1b = ExpectPeeled(b1.branch, peeled);
+ Node* b1t = ExpectPeeled(b1.if_true, peeled);
+ Node* b1f = ExpectPeeled(b1.if_false, peeled);
+
+ EXPECT_THAT(b1b, IsBranch(p0, start()));
+ EXPECT_THAT(ExpectPeeled(b1.if_true, peeled), IsIfTrue(b1b));
+ EXPECT_THAT(b1f, IsIfFalse(b1b));
+
+ Node* b2b = ExpectPeeled(b2.branch, peeled);
+ Node* b2t = ExpectPeeled(b2.if_true, peeled);
+ Node* b2f = ExpectPeeled(b2.if_false, peeled);
+
+ EXPECT_THAT(b2b, IsBranch(p0, b1t));
+ EXPECT_THAT(b2t, IsIfTrue(b2b));
+ EXPECT_THAT(b2f, IsIfFalse(b2b));
+
+ EXPECT_THAT(loop, IsLoop(IsMerge(b2t, b2f), b2.if_true, b2.if_false));
+ EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(b1.if_false, b1f)));
+}
+
+
+TEST_F(LoopPeelingTest, TwoBackedgeLoopWithPhi) {
+ Node* p0 = Parameter(0);
+ Node* loop = graph()->NewNode(common()->Loop(3), start(), start(), start());
+ Branch b1 = NewBranch(p0, loop);
+ Branch b2 = NewBranch(p0, b1.if_true);
+ Node* phi =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 3), Int32Constant(0),
+ Int32Constant(1), Int32Constant(2), loop);
+
+ loop->ReplaceInput(1, b2.if_true);
+ loop->ReplaceInput(2, b2.if_false);
+
+ Node* r = InsertReturn(phi, start(), b1.if_false);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* b1b = ExpectPeeled(b1.branch, peeled);
+ Node* b1t = ExpectPeeled(b1.if_true, peeled);
+ Node* b1f = ExpectPeeled(b1.if_false, peeled);
+
+ EXPECT_THAT(b1b, IsBranch(p0, start()));
+ EXPECT_THAT(ExpectPeeled(b1.if_true, peeled), IsIfTrue(b1b));
+ EXPECT_THAT(b1f, IsIfFalse(b1b));
+
+ Node* b2b = ExpectPeeled(b2.branch, peeled);
+ Node* b2t = ExpectPeeled(b2.if_true, peeled);
+ Node* b2f = ExpectPeeled(b2.if_false, peeled);
+
+ EXPECT_THAT(b2b, IsBranch(p0, b1t));
+ EXPECT_THAT(b2t, IsIfTrue(b2b));
+ EXPECT_THAT(b2f, IsIfFalse(b2b));
+
+ EXPECT_THAT(loop, IsLoop(IsMerge(b2t, b2f), b2.if_true, b2.if_false));
+
+ EXPECT_THAT(
+ phi, IsPhi(kMachAnyTagged, IsPhi(kMachAnyTagged, IsInt32Constant(1),
+ IsInt32Constant(2), IsMerge(b2t, b2f)),
+ IsInt32Constant(1), IsInt32Constant(2), loop));
+
+ Capture<Node*> merge;
+ EXPECT_THAT(
+ r, IsReturn(IsPhi(kMachAnyTagged, phi, IsInt32Constant(0),
+ AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
+ start(), CaptureEq(&merge)));
+}
+
+
+TEST_F(LoopPeelingTest, TwoBackedgeLoopWithCounter) {
+ Node* p0 = Parameter(0);
+ Node* loop = graph()->NewNode(common()->Loop(3), start(), start(), start());
+ Branch b1 = NewBranch(p0, loop);
+ Branch b2 = NewBranch(p0, b1.if_true);
+ Node* phi =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 3), Int32Constant(0),
+ Int32Constant(1), Int32Constant(2), loop);
+
+ phi->ReplaceInput(
+ 1, graph()->NewNode(machine()->Int32Add(), phi, Int32Constant(1)));
+ phi->ReplaceInput(
+ 2, graph()->NewNode(machine()->Int32Add(), phi, Int32Constant(2)));
+
+ loop->ReplaceInput(1, b2.if_true);
+ loop->ReplaceInput(2, b2.if_false);
+
+ Node* r = InsertReturn(phi, start(), b1.if_false);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* b1b = ExpectPeeled(b1.branch, peeled);
+ Node* b1t = ExpectPeeled(b1.if_true, peeled);
+ Node* b1f = ExpectPeeled(b1.if_false, peeled);
+
+ EXPECT_THAT(b1b, IsBranch(p0, start()));
+ EXPECT_THAT(ExpectPeeled(b1.if_true, peeled), IsIfTrue(b1b));
+ EXPECT_THAT(b1f, IsIfFalse(b1b));
+
+ Node* b2b = ExpectPeeled(b2.branch, peeled);
+ Node* b2t = ExpectPeeled(b2.if_true, peeled);
+ Node* b2f = ExpectPeeled(b2.if_false, peeled);
+
+ EXPECT_THAT(b2b, IsBranch(p0, b1t));
+ EXPECT_THAT(b2t, IsIfTrue(b2b));
+ EXPECT_THAT(b2f, IsIfFalse(b2b));
+
+ Capture<Node*> entry;
+ EXPECT_THAT(loop, IsLoop(AllOf(CaptureEq(&entry), IsMerge(b2t, b2f)),
+ b2.if_true, b2.if_false));
+
+ Node* eval = phi->InputAt(0);
+
+ EXPECT_THAT(eval, IsPhi(kMachAnyTagged,
+ IsInt32Add(IsInt32Constant(0), IsInt32Constant(1)),
+ IsInt32Add(IsInt32Constant(0), IsInt32Constant(2)),
+ CaptureEq(&entry)));
+
+ EXPECT_THAT(phi,
+ IsPhi(kMachAnyTagged, eval, IsInt32Add(phi, IsInt32Constant(1)),
+ IsInt32Add(phi, IsInt32Constant(2)), loop));
+
+ Capture<Node*> merge;
+ EXPECT_THAT(
+ r, IsReturn(IsPhi(kMachAnyTagged, phi, IsInt32Constant(0),
+ AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
+ start(), CaptureEq(&merge)));
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 6fdba35f58..f63e70da5a 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -28,7 +28,7 @@ class MachineOperatorReducerTest : public TypedGraphTest {
protected:
Reduction Reduce(Node* node) {
JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(graph(), common(), &javascript, &machine_);
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, &machine_);
MachineOperatorReducer reducer(&jsgraph);
return reducer.Reduce(node);
}
@@ -501,6 +501,30 @@ TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithConstant) {
// Word32And
+TEST_F(MachineOperatorReducerTest, Word32AndWithWord32ShlWithConstant) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FORRANGE(int32_t, l, 1, 31) {
+ TRACED_FORRANGE(int32_t, k, 1, l) {
+ // (x << L) & (-1 << K) => x << L
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Word32Shl(), p0, Int32Constant(l)),
+ Int32Constant(-1 << k)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsWord32Shl(p0, IsInt32Constant(l)));
+
+ // (-1 << K) & (x << L) => x << L
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Word32And(), Int32Constant(-1 << k),
+ graph()->NewNode(machine()->Word32Shl(), p0, Int32Constant(l))));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsWord32Shl(p0, IsInt32Constant(l)));
+ }
+ }
+}
+
+
TEST_F(MachineOperatorReducerTest, Word32AndWithWord32AndWithConstant) {
Node* const p0 = Parameter(0);
@@ -842,6 +866,25 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
// -----------------------------------------------------------------------------
+// Int32Sub
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithConstant) {
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int32_t, k, kInt32Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Sub(), p0, Int32Constant(k)));
+ ASSERT_TRUE(r.Changed());
+ if (k == 0) {
+ EXPECT_EQ(p0, r.replacement());
+ } else {
+ EXPECT_THAT(r.replacement(), IsInt32Add(p0, IsInt32Constant(-k)));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
// Int32Div
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 0b3a0f5a41..efe26d22b4 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -42,19 +42,19 @@ struct FPCmp {
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD,
kMachFloat64},
- kUnorderedEqual},
+ kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD,
kMachFloat64},
- kUnorderedLessThan},
+ kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
kMipsCmpD, kMachFloat64},
- kUnorderedLessThanOrEqual},
+ kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD,
kMachFloat64},
- kUnorderedLessThan},
+ kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual,
"Float64GreaterThanOrEqual", kMipsCmpD, kMachFloat64},
- kUnorderedLessThanOrEqual}};
+ kUnsignedLessThanOrEqual}};
struct Conversion {
// The machine_type field in MachInst1 represents the destination type.
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index a39ae754f0..41453337f2 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -42,19 +42,19 @@ struct FPCmp {
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMips64CmpD,
kMachFloat64},
- kUnorderedEqual},
+ kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMips64CmpD,
kMachFloat64},
- kUnorderedLessThan},
+ kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
kMips64CmpD, kMachFloat64},
- kUnorderedLessThanOrEqual},
+ kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan",
kMips64CmpD, kMachFloat64},
- kUnorderedLessThan},
+ kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual,
"Float64GreaterThanOrEqual", kMips64CmpD, kMachFloat64},
- kUnorderedLessThanOrEqual}};
+ kUnsignedLessThanOrEqual}};
struct Conversion {
// The machine_type field in MachInst1 represents the destination type.
diff --git a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
index 5b956f077a..b8375fab10 100644
--- a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
@@ -12,11 +12,7 @@ namespace compiler {
class MoveOptimizerTest : public InstructionSequenceTest {
public:
GapInstruction* LastGap() {
- auto instruction = sequence()->instructions().back();
- if (!instruction->IsGapMoves()) {
- instruction = *(sequence()->instructions().rbegin() + 1);
- }
- return GapInstruction::cast(instruction);
+ return GapInstruction::cast(*(sequence()->instructions().rbegin() + 1));
}
void AddMove(GapInstruction* gap, TestOperand from, TestOperand to,
@@ -73,12 +69,12 @@ class MoveOptimizerTest : public InstructionSequenceTest {
CHECK_NE(kNoValue, op.value_);
switch (op.type_) {
case kConstant:
- return ConstantOperand::Create(op.value_, zone());
+ return ConstantOperand::New(op.value_, zone());
case kFixedSlot:
- return StackSlotOperand::Create(op.value_, zone());
+ return StackSlotOperand::New(op.value_, zone());
case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers());
- return RegisterOperand::Create(op.value_, zone());
+ return RegisterOperand::New(op.value_, zone());
default:
break;
}
@@ -90,10 +86,10 @@ class MoveOptimizerTest : public InstructionSequenceTest {
TEST_F(MoveOptimizerTest, RemovesRedundant) {
StartBlock();
+ EmitNop();
AddMove(LastGap(), Reg(0), Reg(1));
EmitNop();
AddMove(LastGap(), Reg(1), Reg(0));
- EmitNop();
EndBlock(Last());
Optimize();
diff --git a/deps/v8/test/unittests/compiler/node-properties-unittest.cc b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
new file mode 100644
index 0000000000..bb471bd01e
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
@@ -0,0 +1,59 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-properties.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::AnyOf;
+using testing::IsNull;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef TestWithZone NodePropertiesTest;
+
+
+TEST_F(NodePropertiesTest, FindProjection) {
+ CommonOperatorBuilder common(zone());
+ Node* start = Node::New(zone(), 0, common.Start(1), 0, nullptr, false);
+ Node* proj0 = Node::New(zone(), 1, common.Projection(0), 1, &start, false);
+ Node* proj1 = Node::New(zone(), 2, common.Projection(1), 1, &start, false);
+ EXPECT_EQ(proj0, NodeProperties::FindProjection(start, 0));
+ EXPECT_EQ(proj1, NodeProperties::FindProjection(start, 1));
+ EXPECT_THAT(NodeProperties::FindProjection(start, 2), IsNull());
+ EXPECT_THAT(NodeProperties::FindProjection(start, 1234567890), IsNull());
+}
+
+
+TEST_F(NodePropertiesTest, CollectControlProjections_Branch) {
+ Node* result[2];
+ CommonOperatorBuilder common(zone());
+ Node* branch = Node::New(zone(), 1, common.Branch(), 0, nullptr, false);
+ Node* if_false = Node::New(zone(), 2, common.IfFalse(), 1, &branch, false);
+ Node* if_true = Node::New(zone(), 3, common.IfTrue(), 1, &branch, false);
+ NodeProperties::CollectControlProjections(branch, result, arraysize(result));
+ EXPECT_EQ(if_true, result[0]);
+ EXPECT_EQ(if_false, result[1]);
+}
+
+
+TEST_F(NodePropertiesTest, CollectControlProjections_Switch) {
+ Node* result[3];
+ CommonOperatorBuilder common(zone());
+ Node* sw = Node::New(zone(), 1, common.Switch(3), 0, nullptr, false);
+ Node* if_default = Node::New(zone(), 2, common.IfDefault(), 1, &sw, false);
+ Node* if_value1 = Node::New(zone(), 3, common.IfValue(1), 1, &sw, false);
+ Node* if_value2 = Node::New(zone(), 4, common.IfValue(2), 1, &sw, false);
+ NodeProperties::CollectControlProjections(sw, result, arraysize(result));
+ EXPECT_THAT(result[0], AnyOf(if_value1, if_value2));
+ EXPECT_THAT(result[1], AnyOf(if_value1, if_value2));
+ EXPECT_EQ(if_default, result[2]);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 74afda974a..eccc96227e 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -5,8 +5,9 @@
#include "test/unittests/compiler/node-test-utils.h"
#include "src/assembler.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/unique.h"
using testing::_;
using testing::MakeMatcher;
@@ -94,11 +95,98 @@ class IsBranchMatcher FINAL : public NodeMatcher {
};
-class IsMergeMatcher FINAL : public NodeMatcher {
+class IsSwitchMatcher FINAL : public NodeMatcher {
public:
- IsMergeMatcher(const Matcher<Node*>& control0_matcher,
- const Matcher<Node*>& control1_matcher)
- : NodeMatcher(IrOpcode::kMerge),
+ IsSwitchMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kSwitch),
+ value_matcher_(value_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const FINAL {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsIfValueMatcher FINAL : public NodeMatcher {
+ public:
+ IsIfValueMatcher(const Matcher<int32_t>& value_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kIfValue),
+ value_matcher_(value_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const FINAL {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<int32_t>(node->op()), "value",
+ value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<int32_t> value_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsControl1Matcher FINAL : public NodeMatcher {
+ public:
+ IsControl1Matcher(IrOpcode::Value opcode,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(opcode), control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const FINAL {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsControl2Matcher FINAL : public NodeMatcher {
+ public:
+ IsControl2Matcher(IrOpcode::Value opcode,
+ const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher)
+ : NodeMatcher(opcode),
control0_matcher_(control0_matcher),
control1_matcher_(control1_matcher) {}
@@ -125,27 +213,42 @@ class IsMergeMatcher FINAL : public NodeMatcher {
};
-class IsControl1Matcher FINAL : public NodeMatcher {
+class IsControl3Matcher FINAL : public NodeMatcher {
public:
- IsControl1Matcher(IrOpcode::Value opcode,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(opcode), control_matcher_(control_matcher) {}
+ IsControl3Matcher(IrOpcode::Value opcode,
+ const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher)
+ : NodeMatcher(opcode),
+ control0_matcher_(control0_matcher),
+ control1_matcher_(control1_matcher),
+ control2_matcher_(control2_matcher) {}
void DescribeTo(std::ostream* os) const FINAL {
NodeMatcher::DescribeTo(os);
- *os << " whose control (";
- control_matcher_.DescribeTo(os);
+ *os << " whose control0 (";
+ control0_matcher_.DescribeTo(os);
+ *os << ") and control1 (";
+ control1_matcher_.DescribeTo(os);
+ *os << ") and control2 (";
+ control2_matcher_.DescribeTo(os);
*os << ")";
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
+ "control0", control0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 1),
+ "control1", control1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 2),
+ "control2", control2_matcher_, listener));
}
private:
- const Matcher<Node*> control_matcher_;
+ const Matcher<Node*> control0_matcher_;
+ const Matcher<Node*> control1_matcher_;
+ const Matcher<Node*> control2_matcher_;
};
@@ -180,6 +283,44 @@ class IsFinishMatcher FINAL : public NodeMatcher {
};
+class IsReturnMatcher FINAL : public NodeMatcher {
+ public:
+ IsReturnMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kReturn),
+ value_matcher_(value_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const FINAL {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
template <typename T>
class IsConstantMatcher FINAL : public NodeMatcher {
public:
@@ -294,6 +435,58 @@ class IsPhiMatcher FINAL : public NodeMatcher {
};
+class IsPhi2Matcher FINAL : public NodeMatcher {
+ public:
+ IsPhi2Matcher(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kPhi),
+ type_matcher_(type_matcher),
+ value0_matcher_(value0_matcher),
+ value1_matcher_(value1_matcher),
+ value2_matcher_(value2_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const FINAL {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose type (";
+ type_matcher_.DescribeTo(os);
+ *os << "), value0 (";
+ value0_matcher_.DescribeTo(os);
+ *os << "), value1 (";
+ value1_matcher_.DescribeTo(os);
+ *os << "), value2 (";
+ value2_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
+ type_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value0", value0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "value1", value1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "value2", value2_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<MachineType> type_matcher_;
+ const Matcher<Node*> value0_matcher_;
+ const Matcher<Node*> value1_matcher_;
+ const Matcher<Node*> value2_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
class IsEffectPhiMatcher FINAL : public NodeMatcher {
public:
IsEffectPhiMatcher(const Matcher<Node*>& effect0_matcher,
@@ -332,6 +525,37 @@ class IsEffectPhiMatcher FINAL : public NodeMatcher {
};
+class IsEffectSetMatcher FINAL : public NodeMatcher {
+ public:
+ IsEffectSetMatcher(const Matcher<Node*>& effect0_matcher,
+ const Matcher<Node*>& effect1_matcher)
+ : NodeMatcher(IrOpcode::kEffectSet),
+ effect0_matcher_(effect0_matcher),
+ effect1_matcher_(effect1_matcher) {}
+
+ void DescribeTo(std::ostream* os) const FINAL {
+ NodeMatcher::DescribeTo(os);
+ *os << "), effect0 (";
+ effect0_matcher_.DescribeTo(os);
+ *os << ") and effect1 (";
+ effect1_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 0),
+ "effect0", effect0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 1),
+ "effect1", effect1_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> effect0_matcher_;
+ const Matcher<Node*> effect1_matcher_;
+};
+
+
class IsProjectionMatcher FINAL : public NodeMatcher {
public:
IsProjectionMatcher(const Matcher<size_t>& index_matcher,
@@ -1018,6 +1242,17 @@ class IsUnopMatcher FINAL : public NodeMatcher {
private:
const Matcher<Node*> input_matcher_;
};
+
+} // namespace
+
+
+Matcher<Node*> IsAlways() {
+ return MakeMatcher(new NodeMatcher(IrOpcode::kAlways));
+}
+
+
+Matcher<Node*> IsEnd(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsControl1Matcher(IrOpcode::kEnd, control_matcher));
}
@@ -1029,7 +1264,31 @@ Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher) {
- return MakeMatcher(new IsMergeMatcher(control0_matcher, control1_matcher));
+ return MakeMatcher(new IsControl2Matcher(IrOpcode::kMerge, control0_matcher,
+ control1_matcher));
+}
+
+
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher) {
+ return MakeMatcher(new IsControl3Matcher(IrOpcode::kMerge, control0_matcher,
+ control1_matcher, control2_matcher));
+}
+
+
+Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher) {
+ return MakeMatcher(new IsControl2Matcher(IrOpcode::kLoop, control0_matcher,
+ control1_matcher));
+}
+
+
+Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher) {
+ return MakeMatcher(new IsControl3Matcher(IrOpcode::kLoop, control0_matcher,
+ control1_matcher, control2_matcher));
}
@@ -1044,6 +1303,24 @@ Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher) {
}
+Matcher<Node*> IsSwitch(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsSwitchMatcher(value_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsIfValue(const Matcher<int32_t>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsIfValueMatcher(value_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsIfDefault(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsControl1Matcher(IrOpcode::kIfDefault, control_matcher));
+}
+
+
Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher) {
return MakeMatcher(new IsUnopMatcher(IrOpcode::kValueEffect, value_matcher));
}
@@ -1055,6 +1332,14 @@ Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
}
+Matcher<Node*> IsReturn(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsReturnMatcher(value_matcher, effect_matcher, control_matcher));
+}
+
+
Matcher<Node*> IsExternalConstant(
const Matcher<ExternalReference>& value_matcher) {
return MakeMatcher(new IsConstantMatcher<ExternalReference>(
@@ -1117,6 +1402,17 @@ Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
}
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& merge_matcher) {
+ return MakeMatcher(new IsPhi2Matcher(type_matcher, value0_matcher,
+ value1_matcher, value2_matcher,
+ merge_matcher));
+}
+
+
Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
const Matcher<Node*>& effect1_matcher,
const Matcher<Node*>& merge_matcher) {
@@ -1125,6 +1421,12 @@ Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
}
+Matcher<Node*> IsEffectSet(const Matcher<Node*>& effect0_matcher,
+ const Matcher<Node*>& effect1_matcher) {
+ return MakeMatcher(new IsEffectSetMatcher(effect0_matcher, effect1_matcher));
+}
+
+
Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher) {
return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
@@ -1308,6 +1610,8 @@ IS_UNOP_MATCHER(Float64RoundTruncate)
IS_UNOP_MATCHER(Float64RoundTiesAway)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
+IS_UNOP_MATCHER(ObjectIsSmi)
+IS_UNOP_MATCHER(ObjectIsNonNegativeSmi)
#undef IS_UNOP_MATCHER
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 02b6e43175..03011972b7 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -31,15 +31,33 @@ class Node;
using ::testing::Matcher;
+Matcher<Node*> IsAlways();
+Matcher<Node*> IsEnd(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher);
+Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher);
Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsSwitch(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfValue(const Matcher<int32_t>& value_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfDefault(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsReturn(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsExternalConstant(
const Matcher<ExternalReference>& value_matcher);
Matcher<Node*> IsHeapConstant(
@@ -57,9 +75,16 @@ Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& merge_matcher);
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& merge_matcher);
Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
const Matcher<Node*>& effect1_matcher,
const Matcher<Node*>& merge_matcher);
+Matcher<Node*> IsEffectSet(const Matcher<Node*>& effect0_matcher,
+ const Matcher<Node*>& effect1_matcher);
Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher);
Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
@@ -118,6 +143,8 @@ Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsObjectIsSmi(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsObjectIsNonNegativeSmi(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
diff --git a/deps/v8/test/unittests/compiler/node-unittest.cc b/deps/v8/test/unittests/compiler/node-unittest.cc
new file mode 100644
index 0000000000..f56d7d6f8c
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/node-unittest.cc
@@ -0,0 +1,170 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::ElementsAre;
+using testing::UnorderedElementsAre;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef TestWithZone NodeTest;
+
+
+namespace {
+
+const IrOpcode::Value kOpcode0 = static_cast<IrOpcode::Value>(0);
+const IrOpcode::Value kOpcode1 = static_cast<IrOpcode::Value>(1);
+const IrOpcode::Value kOpcode2 = static_cast<IrOpcode::Value>(2);
+
+const Operator kOp0(kOpcode0, Operator::kNoProperties, "Op0", 0, 0, 0, 1, 0, 0);
+const Operator kOp1(kOpcode1, Operator::kNoProperties, "Op1", 1, 0, 0, 1, 0, 0);
+const Operator kOp2(kOpcode2, Operator::kNoProperties, "Op2", 2, 0, 0, 1, 0, 0);
+
+} // namespace
+
+
+TEST_F(NodeTest, New) {
+ Node* const node = Node::New(zone(), 1, &kOp0, 0, nullptr, false);
+ EXPECT_EQ(1, node->id());
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_TRUE(node->uses().empty());
+ EXPECT_EQ(0, node->InputCount());
+ EXPECT_TRUE(node->inputs().empty());
+ EXPECT_EQ(&kOp0, node->op());
+ EXPECT_EQ(kOpcode0, node->opcode());
+}
+
+
+TEST_F(NodeTest, NewWithInputs) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ EXPECT_EQ(0, n0->UseCount());
+ EXPECT_EQ(0, n0->InputCount());
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ EXPECT_EQ(1, n0->UseCount());
+ EXPECT_EQ(n1, n0->UseAt(0));
+ EXPECT_EQ(0, n1->UseCount());
+ EXPECT_EQ(1, n1->InputCount());
+ EXPECT_EQ(n0, n1->InputAt(0));
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ EXPECT_EQ(2, n0->UseCount());
+ EXPECT_EQ(n1, n0->UseAt(0));
+ EXPECT_EQ(n2, n0->UseAt(1));
+ EXPECT_EQ(2, n2->InputCount());
+ EXPECT_EQ(n0, n2->InputAt(0));
+ EXPECT_EQ(n1, n2->InputAt(1));
+}
+
+
+TEST_F(NodeTest, InputIteratorEmpty) {
+ Node* node = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ EXPECT_EQ(node->inputs().begin(), node->inputs().end());
+}
+
+
+TEST_F(NodeTest, InputIteratorOne) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ EXPECT_THAT(n1->inputs(), ElementsAre(n0));
+}
+
+
+TEST_F(NodeTest, InputIteratorTwo) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ EXPECT_THAT(n2->inputs(), ElementsAre(n0, n1));
+}
+
+
+TEST_F(NodeTest, UseIteratorEmpty) {
+ Node* node = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ EXPECT_EQ(node->uses().begin(), node->uses().end());
+}
+
+
+TEST_F(NodeTest, UseIteratorOne) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ EXPECT_THAT(n0->uses(), ElementsAre(n1));
+}
+
+
+TEST_F(NodeTest, UseIteratorTwo) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ EXPECT_THAT(n0->uses(), UnorderedElementsAre(n1, n2));
+}
+
+
+TEST_F(NodeTest, OwnedBy) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ EXPECT_FALSE(n0->OwnedBy(n0));
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ EXPECT_FALSE(n0->OwnedBy(n0));
+ EXPECT_FALSE(n1->OwnedBy(n1));
+ EXPECT_TRUE(n0->OwnedBy(n1));
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ EXPECT_FALSE(n0->OwnedBy(n0));
+ EXPECT_FALSE(n1->OwnedBy(n1));
+ EXPECT_FALSE(n2->OwnedBy(n2));
+ EXPECT_FALSE(n0->OwnedBy(n1));
+ EXPECT_FALSE(n0->OwnedBy(n2));
+ EXPECT_TRUE(n1->OwnedBy(n2));
+ n2->ReplaceInput(0, n2);
+ EXPECT_TRUE(n0->OwnedBy(n1));
+ EXPECT_TRUE(n1->OwnedBy(n2));
+ n2->ReplaceInput(1, n0);
+ EXPECT_FALSE(n0->OwnedBy(n1));
+ EXPECT_FALSE(n1->OwnedBy(n2));
+}
+
+
+TEST_F(NodeTest, ReplaceUsesNone) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ Node* node = Node::New(zone(), 42, &kOp0, 0, nullptr, false);
+ EXPECT_TRUE(node->uses().empty());
+ node->ReplaceUses(n0);
+ EXPECT_TRUE(node->uses().empty());
+ node->ReplaceUses(n1);
+ EXPECT_TRUE(node->uses().empty());
+ node->ReplaceUses(n2);
+ EXPECT_TRUE(node->uses().empty());
+}
+
+
+TEST_F(NodeTest, AppendInput) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ Node* node = Node::New(zone(), 12345, &kOp0, 0, nullptr, true);
+ EXPECT_TRUE(node->inputs().empty());
+ node->AppendInput(zone(), n0);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n0));
+ node->AppendInput(zone(), n1);
+ EXPECT_THAT(node->inputs(), ElementsAre(n0, n1));
+ node->AppendInput(zone(), n0);
+ EXPECT_THAT(node->inputs(), ElementsAre(n0, n1, n0));
+ node->AppendInput(zone(), n0);
+ EXPECT_THAT(node->inputs(), ElementsAre(n0, n1, n0, n0));
+ node->AppendInput(zone(), n1);
+ EXPECT_THAT(node->inputs(), ElementsAre(n0, n1, n0, n0, n1));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/opcodes-unittest.cc b/deps/v8/test/unittests/compiler/opcodes-unittest.cc
new file mode 100644
index 0000000000..ca79e8ac8b
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/opcodes-unittest.cc
@@ -0,0 +1,122 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/opcodes.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+bool IsCommonOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ COMMON_OP_LIST(OPCODE)
+ CONTROL_OP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
+bool IsControlOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ CONTROL_OP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
+bool IsJsOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ JS_OP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
+bool IsConstantOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ CONSTANT_OP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
+const IrOpcode::Value kInvalidOpcode = static_cast<IrOpcode::Value>(123456789);
+
+} // namespace
+
+
+TEST(IrOpcodeTest, IsCommonOpcode) {
+ EXPECT_FALSE(IrOpcode::IsCommonOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsCommonOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsCommonOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
+TEST(IrOpcodeTest, IsControlOpcode) {
+ EXPECT_FALSE(IrOpcode::IsControlOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsControlOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsControlOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
+TEST(IrOpcodeTest, IsJsOpcode) {
+ EXPECT_FALSE(IrOpcode::IsJsOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsJsOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsJsOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
+TEST(IrOpcodeTest, IsConstantOpcode) {
+ EXPECT_FALSE(IrOpcode::IsConstantOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsConstantOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsConstantOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
+TEST(IrOpcodeTest, Mnemonic) {
+ EXPECT_STREQ("UnknownOpcode", IrOpcode::Mnemonic(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_STREQ(#Opcode, IrOpcode::Mnemonic(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc b/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
new file mode 100644
index 0000000000..5fe72eec40
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
index 12dedbdd73..c82cc3733e 100644
--- a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
@@ -42,9 +42,9 @@ TEST_F(RegisterAllocatorTest, SimpleLoop) {
StartLoop(1);
StartBlock();
- auto phi = Phi(i_reg);
+ auto phi = Phi(i_reg, 2);
auto ipp = EmitOI(Same(), Reg(phi), Use(DefineConstant()));
- Extend(phi, ipp);
+ SetInput(phi, 1, ipp);
EndBlock(Jump(0));
EndLoop();
@@ -206,14 +206,14 @@ TEST_F(RegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) {
StartBlock();
for (size_t i = 0; i < arraysize(parameters); ++i) {
- phis[i] = Phi(parameters[i]);
+ phis[i] = Phi(parameters[i], 2);
}
// Perform some computations.
// something like phi[i] += const
for (size_t i = 0; i < arraysize(parameters); ++i) {
auto result = EmitOI(Same(), Reg(phis[i]), Use(constant));
- Extend(phis[i], result);
+ SetInput(phis[i], 1, result);
}
EndBlock(Branch(Reg(DefineConstant()), 1, 2));
@@ -432,6 +432,40 @@ TEST_F(RegisterAllocatorTest, RegressionSpillTwice) {
}
+TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
+ StartBlock();
+ // Fill registers.
+ VReg values[kDefaultNRegs];
+ for (size_t i = arraysize(values); i > 0; --i) {
+ values[i - 1] = Define(Reg(static_cast<int>(i - 1)));
+ }
+ auto c = DefineConstant();
+ auto to_spill = Define(Reg());
+ EndBlock(Jump(1));
+
+ {
+ StartLoop(1);
+
+ StartBlock();
+ // Create a use for c in second half of prev block's last gap
+ Phi(c);
+ for (size_t i = arraysize(values); i > 0; --i) {
+ Phi(values[i - 1]);
+ }
+ EndBlock(Jump(1));
+
+ EndLoop();
+ }
+
+ StartBlock();
+ // Force c to split within to_spill's definition.
+ EmitI(Reg(c));
+ EmitI(Reg(to_spill));
+ EndBlock(Last());
+
+ Allocate();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/schedule-unittest.cc b/deps/v8/test/unittests/compiler/schedule-unittest.cc
new file mode 100644
index 0000000000..70fd4d50ad
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/schedule-unittest.cc
@@ -0,0 +1,218 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::ElementsAre;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef TestWithIsolateAndZone BasicBlockTest;
+
+
+TEST_F(BasicBlockTest, Constructor) {
+ int const id = random_number_generator()->NextInt();
+ BasicBlock b(zone(), BasicBlock::Id::FromInt(id));
+ EXPECT_FALSE(b.deferred());
+ EXPECT_GT(0, b.dominator_depth());
+ EXPECT_EQ(nullptr, b.dominator());
+ EXPECT_EQ(nullptr, b.rpo_next());
+ EXPECT_EQ(id, b.id().ToInt());
+}
+
+
+TEST_F(BasicBlockTest, GetCommonDominator1) {
+ BasicBlock b(zone(), BasicBlock::Id::FromInt(0));
+ EXPECT_EQ(&b, BasicBlock::GetCommonDominator(&b, &b));
+}
+
+
+TEST_F(BasicBlockTest, GetCommonDominator2) {
+ BasicBlock b0(zone(), BasicBlock::Id::FromInt(0));
+ BasicBlock b1(zone(), BasicBlock::Id::FromInt(1));
+ BasicBlock b2(zone(), BasicBlock::Id::FromInt(2));
+ b0.set_dominator_depth(0);
+ b1.set_dominator(&b0);
+ b1.set_dominator_depth(1);
+ b2.set_dominator(&b1);
+ b2.set_dominator_depth(2);
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b0, &b1));
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b0, &b2));
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b1, &b0));
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b2, &b0));
+ EXPECT_EQ(&b1, BasicBlock::GetCommonDominator(&b1, &b2));
+ EXPECT_EQ(&b1, BasicBlock::GetCommonDominator(&b2, &b1));
+}
+
+
+TEST_F(BasicBlockTest, GetCommonDominator3) {
+ BasicBlock b0(zone(), BasicBlock::Id::FromInt(0));
+ BasicBlock b1(zone(), BasicBlock::Id::FromInt(1));
+ BasicBlock b2(zone(), BasicBlock::Id::FromInt(2));
+ BasicBlock b3(zone(), BasicBlock::Id::FromInt(3));
+ b0.set_dominator_depth(0);
+ b1.set_dominator(&b0);
+ b1.set_dominator_depth(1);
+ b2.set_dominator(&b0);
+ b2.set_dominator_depth(1);
+ b3.set_dominator(&b2);
+ b3.set_dominator_depth(2);
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b1, &b3));
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b3, &b1));
+}
+
+
+typedef TestWithZone ScheduleTest;
+
+
+namespace {
+
+const Operator kBranchOperator(IrOpcode::kBranch, Operator::kNoProperties,
+ "Branch", 0, 0, 0, 0, 0, 0);
+const Operator kDummyOperator(IrOpcode::kParameter, Operator::kNoProperties,
+ "Dummy", 0, 0, 0, 0, 0, 0);
+
+} // namespace
+
+
+TEST_F(ScheduleTest, Constructor) {
+ Schedule schedule(zone());
+ EXPECT_NE(nullptr, schedule.start());
+ EXPECT_EQ(schedule.start(),
+ schedule.GetBlockById(BasicBlock::Id::FromInt(0)));
+ EXPECT_NE(nullptr, schedule.end());
+ EXPECT_EQ(schedule.end(), schedule.GetBlockById(BasicBlock::Id::FromInt(1)));
+ EXPECT_NE(schedule.start(), schedule.end());
+}
+
+
+TEST_F(ScheduleTest, AddNode) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+
+ Node* node0 = Node::New(zone(), 0, &kDummyOperator, 0, nullptr, false);
+ EXPECT_EQ(nullptr, schedule.block(node0));
+ schedule.AddNode(start, node0);
+ EXPECT_EQ(start, schedule.block(node0));
+ EXPECT_THAT(*start, ElementsAre(node0));
+
+ Node* node1 = Node::New(zone(), 1, &kDummyOperator, 0, nullptr, false);
+ EXPECT_EQ(nullptr, schedule.block(node1));
+ schedule.AddNode(start, node1);
+ EXPECT_EQ(start, schedule.block(node1));
+ EXPECT_THAT(*start, ElementsAre(node0, node1));
+
+ EXPECT_TRUE(schedule.SameBasicBlock(node0, node1));
+}
+
+
+TEST_F(ScheduleTest, AddGoto) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+ BasicBlock* end = schedule.end();
+
+ BasicBlock* block = schedule.NewBasicBlock();
+ schedule.AddGoto(start, block);
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(1u, start->SuccessorCount());
+ EXPECT_EQ(block, start->SuccessorAt(0));
+ EXPECT_THAT(start->successors(), ElementsAre(block));
+
+ EXPECT_EQ(1u, block->PredecessorCount());
+ EXPECT_EQ(0u, block->SuccessorCount());
+ EXPECT_EQ(start, block->PredecessorAt(0));
+ EXPECT_THAT(block->predecessors(), ElementsAre(start));
+
+ EXPECT_EQ(0u, end->PredecessorCount());
+ EXPECT_EQ(0u, end->SuccessorCount());
+}
+
+
+TEST_F(ScheduleTest, AddBranch) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+
+ Node* branch = Node::New(zone(), 0, &kBranchOperator, 0, nullptr, false);
+ BasicBlock* tblock = schedule.NewBasicBlock();
+ BasicBlock* fblock = schedule.NewBasicBlock();
+ schedule.AddBranch(start, branch, tblock, fblock);
+
+ EXPECT_EQ(start, schedule.block(branch));
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(2u, start->SuccessorCount());
+ EXPECT_EQ(tblock, start->SuccessorAt(0));
+ EXPECT_EQ(fblock, start->SuccessorAt(1));
+ EXPECT_THAT(start->successors(), ElementsAre(tblock, fblock));
+
+ EXPECT_EQ(1u, tblock->PredecessorCount());
+ EXPECT_EQ(0u, tblock->SuccessorCount());
+ EXPECT_EQ(start, tblock->PredecessorAt(0));
+ EXPECT_THAT(tblock->predecessors(), ElementsAre(start));
+
+ EXPECT_EQ(1u, fblock->PredecessorCount());
+ EXPECT_EQ(0u, fblock->SuccessorCount());
+ EXPECT_EQ(start, fblock->PredecessorAt(0));
+ EXPECT_THAT(fblock->predecessors(), ElementsAre(start));
+}
+
+
+TEST_F(ScheduleTest, AddReturn) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+ BasicBlock* end = schedule.end();
+
+ Node* node = Node::New(zone(), 0, &kDummyOperator, 0, nullptr, false);
+ schedule.AddReturn(start, node);
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(1u, start->SuccessorCount());
+ EXPECT_EQ(end, start->SuccessorAt(0));
+ EXPECT_THAT(start->successors(), ElementsAre(end));
+}
+
+
+TEST_F(ScheduleTest, InsertBranch) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+ BasicBlock* end = schedule.end();
+
+ Node* node = Node::New(zone(), 0, &kDummyOperator, 0, nullptr, false);
+ Node* branch = Node::New(zone(), 0, &kBranchOperator, 0, nullptr, false);
+ BasicBlock* tblock = schedule.NewBasicBlock();
+ BasicBlock* fblock = schedule.NewBasicBlock();
+ BasicBlock* mblock = schedule.NewBasicBlock();
+
+ schedule.AddReturn(start, node);
+ schedule.AddGoto(tblock, mblock);
+ schedule.AddGoto(fblock, mblock);
+ schedule.InsertBranch(start, mblock, branch, tblock, fblock);
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(2u, start->SuccessorCount());
+ EXPECT_EQ(tblock, start->SuccessorAt(0));
+ EXPECT_EQ(fblock, start->SuccessorAt(1));
+ EXPECT_THAT(start->successors(), ElementsAre(tblock, fblock));
+
+ EXPECT_EQ(2u, mblock->PredecessorCount());
+ EXPECT_EQ(1u, mblock->SuccessorCount());
+ EXPECT_EQ(end, mblock->SuccessorAt(0));
+ EXPECT_THAT(mblock->predecessors(), ElementsAre(tblock, fblock));
+ EXPECT_THAT(mblock->successors(), ElementsAre(end));
+
+ EXPECT_EQ(1u, end->PredecessorCount());
+ EXPECT_EQ(0u, end->SuccessorCount());
+ EXPECT_EQ(mblock, end->PredecessorAt(0));
+ EXPECT_THAT(end->predecessors(), ElementsAre(mblock));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
new file mode 100644
index 0000000000..860d5cd325
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -0,0 +1,2018 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/verifier.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SchedulerTest : public TestWithZone {
+ public:
+ SchedulerTest()
+ : graph_(zone()), common_(zone()), simplified_(zone()), js_(zone()) {}
+
+ static Schedule* ComputeAndVerifySchedule(int expected, Graph* graph) {
+ if (FLAG_trace_turbo) {
+ OFStream os(stdout);
+ os << AsDOT(*graph);
+ }
+
+ Schedule* schedule = Scheduler::ComputeSchedule(graph->zone(), graph,
+ Scheduler::kSplitNodes);
+
+ if (FLAG_trace_turbo_scheduler) {
+ OFStream os(stdout);
+ os << *schedule << std::endl;
+ }
+ ScheduleVerifier::Run(schedule);
+ CHECK_EQ(expected, GetScheduledNodeCount(schedule));
+ return schedule;
+ }
+
+ static int GetScheduledNodeCount(const Schedule* schedule) {
+ size_t node_count = 0;
+ for (auto block : *schedule->rpo_order()) {
+ node_count += block->NodeCount();
+ if (block->control() != BasicBlock::kNone) ++node_count;
+ }
+ return static_cast<int>(node_count);
+ }
+
+ Graph* graph() { return &graph_; }
+ CommonOperatorBuilder* common() { return &common_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ JSOperatorBuilder* js() { return &js_; }
+
+ private:
+ Graph graph_;
+ CommonOperatorBuilder common_;
+ SimplifiedOperatorBuilder simplified_;
+ JSOperatorBuilder js_;
+};
+
+
+class SchedulerRPOTest : public SchedulerTest {
+ public:
+ SchedulerRPOTest() {}
+
+ // TODO(titzer): pull RPO tests out to their own file.
+ static void CheckRPONumbers(BasicBlockVector* order, size_t expected,
+ bool loops_allowed) {
+ CHECK(expected == order->size());
+ for (int i = 0; i < static_cast<int>(order->size()); i++) {
+ CHECK(order->at(i)->rpo_number() == i);
+ if (!loops_allowed) {
+ CHECK(!order->at(i)->loop_end());
+ CHECK(!order->at(i)->loop_header());
+ }
+ }
+ }
+
+ static void CheckLoop(BasicBlockVector* order, BasicBlock** blocks,
+ int body_size) {
+ BasicBlock* header = blocks[0];
+ BasicBlock* end = header->loop_end();
+ CHECK(end);
+ CHECK_GT(end->rpo_number(), 0);
+ CHECK_EQ(body_size, end->rpo_number() - header->rpo_number());
+ for (int i = 0; i < body_size; i++) {
+ CHECK_GE(blocks[i]->rpo_number(), header->rpo_number());
+ CHECK_LT(blocks[i]->rpo_number(), end->rpo_number());
+ CHECK(header->LoopContains(blocks[i]));
+ CHECK(header->IsLoopHeader() || blocks[i]->loop_header() == header);
+ }
+ if (header->rpo_number() > 0) {
+ CHECK_NE(order->at(header->rpo_number() - 1)->loop_header(), header);
+ }
+ if (end->rpo_number() < static_cast<int>(order->size())) {
+ CHECK_NE(order->at(end->rpo_number())->loop_header(), header);
+ }
+ }
+
+ struct TestLoop {
+ int count;
+ BasicBlock** nodes;
+ BasicBlock* header() { return nodes[0]; }
+ BasicBlock* last() { return nodes[count - 1]; }
+ ~TestLoop() { delete[] nodes; }
+
+ void Check(BasicBlockVector* order) { CheckLoop(order, nodes, count); }
+ };
+
+ static TestLoop* CreateLoop(Schedule* schedule, int count) {
+ TestLoop* loop = new TestLoop();
+ loop->count = count;
+ loop->nodes = new BasicBlock* [count];
+ for (int i = 0; i < count; i++) {
+ loop->nodes[i] = schedule->NewBasicBlock();
+ if (i > 0) {
+ schedule->AddSuccessorForTesting(loop->nodes[i - 1], loop->nodes[i]);
+ }
+ }
+ schedule->AddSuccessorForTesting(loop->nodes[count - 1], loop->nodes[0]);
+ return loop;
+ }
+};
+
+
+class SchedulerTestWithIsolate : public SchedulerTest, public TestWithIsolate {
+ public:
+ SchedulerTestWithIsolate() {}
+
+ Unique<HeapObject> GetUniqueUndefined() {
+ Handle<HeapObject> object =
+ Handle<HeapObject>(isolate()->heap()->undefined_value(), isolate());
+ return Unique<HeapObject>::CreateUninitialized(object);
+ }
+};
+
+namespace {
+
+const Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0,
+ 0, 1, 0, 0);
+
+} // namespace
+
+
+TEST_F(SchedulerTest, BuildScheduleEmpty) {
+ graph()->SetStart(graph()->NewNode(common()->Start(0)));
+ graph()->SetEnd(graph()->NewNode(common()->End(), graph()->start()));
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+}
+
+
+TEST_F(SchedulerTest, BuildScheduleOneParameter) {
+ graph()->SetStart(graph()->NewNode(common()->Start(0)));
+
+ Node* p1 = graph()->NewNode(common()->Parameter(0), graph()->start());
+ Node* ret = graph()->NewNode(common()->Return(), p1, graph()->start(),
+ graph()->start());
+
+ graph()->SetEnd(graph()->NewNode(common()->End(), ret));
+
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+}
+
+
+TEST_F(SchedulerTest, BuildScheduleIfSplit) {
+ graph()->SetStart(graph()->NewNode(common()->Start(3)));
+
+ Node* p1 = graph()->NewNode(common()->Parameter(0), graph()->start());
+ Node* p2 = graph()->NewNode(common()->Parameter(1), graph()->start());
+ Node* p3 = graph()->NewNode(common()->Parameter(2), graph()->start());
+ Node* p4 = graph()->NewNode(common()->Parameter(3), graph()->start());
+ Node* p5 = graph()->NewNode(common()->Parameter(4), graph()->start());
+ Node* cmp = graph()->NewNode(js()->LessThanOrEqual(), p1, p2, p3,
+ graph()->start(), graph()->start());
+ Node* branch = graph()->NewNode(common()->Branch(), cmp, graph()->start());
+ Node* true_branch = graph()->NewNode(common()->IfTrue(), branch);
+ Node* false_branch = graph()->NewNode(common()->IfFalse(), branch);
+
+ Node* ret1 =
+ graph()->NewNode(common()->Return(), p4, graph()->start(), true_branch);
+ Node* ret2 =
+ graph()->NewNode(common()->Return(), p5, graph()->start(), false_branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), ret1, ret2);
+ graph()->SetEnd(graph()->NewNode(common()->End(), merge));
+
+ ComputeAndVerifySchedule(13, graph());
+}
+
+
+TEST_F(SchedulerRPOTest, Degenerate1) {
+ Schedule schedule(zone());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 1, false);
+ CHECK_EQ(schedule.start(), order->at(0));
+}
+
+
+TEST_F(SchedulerRPOTest, Degenerate2) {
+ Schedule schedule(zone());
+
+ schedule.AddGoto(schedule.start(), schedule.end());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 2, false);
+ CHECK_EQ(schedule.start(), order->at(0));
+ CHECK_EQ(schedule.end(), order->at(1));
+}
+
+
+TEST_F(SchedulerRPOTest, Line) {
+ for (int i = 0; i < 10; i++) {
+ Schedule schedule(zone());
+
+ BasicBlock* last = schedule.start();
+ for (int j = 0; j < i; j++) {
+ BasicBlock* block = schedule.NewBasicBlock();
+ block->set_deferred(i & 1);
+ schedule.AddGoto(last, block);
+ last = block;
+ }
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 1 + i, false);
+
+ for (size_t i = 0; i < schedule.BasicBlockCount(); i++) {
+ BasicBlock* block = schedule.GetBlockById(BasicBlock::Id::FromSize(i));
+ if (block->rpo_number() >= 0 && block->SuccessorCount() == 1) {
+ CHECK(block->rpo_number() + 1 == block->SuccessorAt(0)->rpo_number());
+ }
+ }
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, SelfLoop) {
+ Schedule schedule(zone());
+ schedule.AddSuccessorForTesting(schedule.start(), schedule.start());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 1, true);
+ BasicBlock* loop[] = {schedule.start()};
+ CheckLoop(order, loop, 1);
+}
+
+
+TEST_F(SchedulerRPOTest, EntryLoop) {
+ Schedule schedule(zone());
+ BasicBlock* body = schedule.NewBasicBlock();
+ schedule.AddSuccessorForTesting(schedule.start(), body);
+ schedule.AddSuccessorForTesting(body, schedule.start());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 2, true);
+ BasicBlock* loop[] = {schedule.start(), body};
+ CheckLoop(order, loop, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, EndLoop) {
+ Schedule schedule(zone());
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 3, true);
+ loop1->Check(order);
+}
+
+
+TEST_F(SchedulerRPOTest, EndLoopNested) {
+ Schedule schedule(zone());
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 3, true);
+ loop1->Check(order);
+}
+
+
+TEST_F(SchedulerRPOTest, Diamond) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(A, C);
+ schedule.AddSuccessorForTesting(B, D);
+ schedule.AddSuccessorForTesting(C, D);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 4, false);
+
+ CHECK_EQ(0, A->rpo_number());
+ CHECK((B->rpo_number() == 1 && C->rpo_number() == 2) ||
+ (B->rpo_number() == 2 && C->rpo_number() == 1));
+ CHECK_EQ(3, D->rpo_number());
+}
+
+
+TEST_F(SchedulerRPOTest, Loop1) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(C, D);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 4, true);
+ BasicBlock* loop[] = {B, C};
+ CheckLoop(order, loop, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, Loop2) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(B, D);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 4, true);
+ BasicBlock* loop[] = {B, C};
+ CheckLoop(order, loop, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopN) {
+ for (int i = 0; i < 11; i++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+ BasicBlock* F = schedule.NewBasicBlock();
+ BasicBlock* G = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, F);
+ schedule.AddSuccessorForTesting(F, B);
+ schedule.AddSuccessorForTesting(B, G);
+
+ // Throw in extra backedges from time to time.
+ if (i == 1) schedule.AddSuccessorForTesting(B, B);
+ if (i == 2) schedule.AddSuccessorForTesting(C, B);
+ if (i == 3) schedule.AddSuccessorForTesting(D, B);
+ if (i == 4) schedule.AddSuccessorForTesting(E, B);
+ if (i == 5) schedule.AddSuccessorForTesting(F, B);
+
+ // Throw in extra loop exits from time to time.
+ if (i == 6) schedule.AddSuccessorForTesting(B, G);
+ if (i == 7) schedule.AddSuccessorForTesting(C, G);
+ if (i == 8) schedule.AddSuccessorForTesting(D, G);
+ if (i == 9) schedule.AddSuccessorForTesting(E, G);
+ if (i == 10) schedule.AddSuccessorForTesting(F, G);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 7, true);
+ BasicBlock* loop[] = {B, C, D, E, F};
+ CheckLoop(order, loop, 5);
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopNest1) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+ BasicBlock* F = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, C);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, B);
+ schedule.AddSuccessorForTesting(E, F);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 6, true);
+ BasicBlock* loop1[] = {B, C, D, E};
+ CheckLoop(order, loop1, 4);
+
+ BasicBlock* loop2[] = {C, D};
+ CheckLoop(order, loop2, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopNest2) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+ BasicBlock* F = schedule.NewBasicBlock();
+ BasicBlock* G = schedule.NewBasicBlock();
+ BasicBlock* H = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, F);
+ schedule.AddSuccessorForTesting(F, G);
+ schedule.AddSuccessorForTesting(G, H);
+
+ schedule.AddSuccessorForTesting(E, D);
+ schedule.AddSuccessorForTesting(F, C);
+ schedule.AddSuccessorForTesting(G, B);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 8, true);
+ BasicBlock* loop1[] = {B, C, D, E, F, G};
+ CheckLoop(order, loop1, 6);
+
+ BasicBlock* loop2[] = {C, D, E, F};
+ CheckLoop(order, loop2, 4);
+
+ BasicBlock* loop3[] = {D, E};
+ CheckLoop(order, loop3, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopFollow1) {
+ Schedule schedule(zone());
+
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
+ static_cast<int>(order->size()));
+
+ loop1->Check(order);
+ loop2->Check(order);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopFollow2) {
+ Schedule schedule(zone());
+
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* S = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), S);
+ schedule.AddSuccessorForTesting(S, loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
+ static_cast<int>(order->size()));
+ loop1->Check(order);
+ loop2->Check(order);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopFollowN) {
+ for (int size = 1; size < 5; size++) {
+ for (int exit = 0; exit < size; exit++) {
+ Schedule schedule(zone());
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[exit], loop2->header());
+ schedule.AddSuccessorForTesting(loop2->nodes[exit], E);
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
+ static_cast<int>(order->size()));
+ loop1->Check(order);
+ loop2->Check(order);
+ }
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, NestedLoopFollow1) {
+ Schedule schedule(zone());
+
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), C);
+ schedule.AddSuccessorForTesting(C, E);
+ schedule.AddSuccessorForTesting(C, B);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
+ static_cast<int>(order->size()));
+ loop1->Check(order);
+ loop2->Check(order);
+
+ BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
+ CheckLoop(order, loop3, 4);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopBackedges1) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ for (int j = 0; j < size; j++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[j], E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ loop1->Check(order);
+ }
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopOutedges1) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ for (int j = 0; j < size; j++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.end();
+
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[j], D);
+ schedule.AddSuccessorForTesting(D, E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ loop1->Check(order);
+ }
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopOutedges2) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ for (int j = 0; j < size; j++) {
+ BasicBlock* O = schedule.NewBasicBlock();
+ schedule.AddSuccessorForTesting(loop1->nodes[j], O);
+ schedule.AddSuccessorForTesting(O, E);
+ }
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ loop1->Check(order);
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopOutloops1) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+ SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ TestLoop** loopN = new TestLoop* [size];
+ for (int j = 0; j < size; j++) {
+ loopN[j] = CreateLoop(&schedule, 2);
+ schedule.AddSuccessorForTesting(loop1->nodes[j], loopN[j]->header());
+ schedule.AddSuccessorForTesting(loopN[j]->last(), E);
+ }
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ loop1->Check(order);
+
+ for (int j = 0; j < size; j++) {
+ loopN[j]->Check(order);
+ delete loopN[j];
+ }
+ delete[] loopN;
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopMultibackedge) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(B, D);
+ schedule.AddSuccessorForTesting(B, E);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(D, B);
+ schedule.AddSuccessorForTesting(E, B);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 5, true);
+
+ BasicBlock* loop1[] = {B, C, D, E};
+ CheckLoop(order, loop1, 4);
+}
+
+
+TEST_F(SchedulerTestWithIsolate, BuildScheduleIfSplitWithEffects) {
+ const Operator* op;
+
+ // Manually transcripted code for:
+ // function turbo_fan_test(a, b, c, y) {
+ // if (a < b) {
+ // return a + b - c * c - a + y;
+ // } else {
+ // return c * c - a;
+ // }
+ // }
+ op = common()->Start(0);
+ Node* n0 = graph()->NewNode(op);
+ USE(n0);
+ Node* nil = graph()->NewNode(common()->Dead());
+ op = common()->End();
+ Node* n23 = graph()->NewNode(op, nil);
+ USE(n23);
+ op = common()->Merge(2);
+ Node* n22 = graph()->NewNode(op, nil, nil);
+ USE(n22);
+ op = common()->Return();
+ Node* n16 = graph()->NewNode(op, nil, nil, nil);
+ USE(n16);
+ op = js()->Add();
+ Node* n15 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n15);
+ op = js()->Subtract();
+ Node* n14 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n14);
+ op = js()->Subtract();
+ Node* n13 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n13);
+ op = js()->Add();
+ Node* n11 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n11);
+ op = common()->Parameter(0);
+ Node* n2 = graph()->NewNode(op, n0);
+ USE(n2);
+ n11->ReplaceInput(0, n2);
+ op = common()->Parameter(0);
+ Node* n3 = graph()->NewNode(op, n0);
+ USE(n3);
+ n11->ReplaceInput(1, n3);
+ op = common()->HeapConstant(GetUniqueUndefined());
+ Node* n7 = graph()->NewNode(op);
+ USE(n7);
+ n11->ReplaceInput(2, n7);
+ op = js()->LessThan();
+ Node* n8 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n8);
+ n8->ReplaceInput(0, n2);
+ n8->ReplaceInput(1, n3);
+ n8->ReplaceInput(2, n7);
+ n8->ReplaceInput(3, n0);
+ n8->ReplaceInput(4, n0);
+ n11->ReplaceInput(3, n8);
+ op = common()->IfTrue();
+ Node* n10 = graph()->NewNode(op, nil);
+ USE(n10);
+ op = common()->Branch();
+ Node* n9 = graph()->NewNode(op, nil, nil);
+ USE(n9);
+ n9->ReplaceInput(0, n8);
+ n9->ReplaceInput(1, n0);
+ n10->ReplaceInput(0, n9);
+ n11->ReplaceInput(4, n10);
+ n13->ReplaceInput(0, n11);
+ op = js()->Multiply();
+ Node* n12 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n12);
+ op = common()->Parameter(0);
+ Node* n4 = graph()->NewNode(op, n0);
+ USE(n4);
+ n12->ReplaceInput(0, n4);
+ n12->ReplaceInput(1, n4);
+ n12->ReplaceInput(2, n7);
+ n12->ReplaceInput(3, n11);
+ n12->ReplaceInput(4, n10);
+ n13->ReplaceInput(1, n12);
+ n13->ReplaceInput(2, n7);
+ n13->ReplaceInput(3, n12);
+ n13->ReplaceInput(4, n10);
+ n14->ReplaceInput(0, n13);
+ n14->ReplaceInput(1, n2);
+ n14->ReplaceInput(2, n7);
+ n14->ReplaceInput(3, n13);
+ n14->ReplaceInput(4, n10);
+ n15->ReplaceInput(0, n14);
+ op = common()->Parameter(0);
+ Node* n5 = graph()->NewNode(op, n0);
+ USE(n5);
+ n15->ReplaceInput(1, n5);
+ n15->ReplaceInput(2, n7);
+ n15->ReplaceInput(3, n14);
+ n15->ReplaceInput(4, n10);
+ n16->ReplaceInput(0, n15);
+ n16->ReplaceInput(1, n15);
+ n16->ReplaceInput(2, n10);
+ n22->ReplaceInput(0, n16);
+ op = common()->Return();
+ Node* n21 = graph()->NewNode(op, nil, nil, nil);
+ USE(n21);
+ op = js()->Subtract();
+ Node* n20 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n20);
+ op = js()->Multiply();
+ Node* n19 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n19);
+ n19->ReplaceInput(0, n4);
+ n19->ReplaceInput(1, n4);
+ n19->ReplaceInput(2, n7);
+ n19->ReplaceInput(3, n8);
+ op = common()->IfFalse();
+ Node* n18 = graph()->NewNode(op, nil);
+ USE(n18);
+ n18->ReplaceInput(0, n9);
+ n19->ReplaceInput(4, n18);
+ n20->ReplaceInput(0, n19);
+ n20->ReplaceInput(1, n2);
+ n20->ReplaceInput(2, n7);
+ n20->ReplaceInput(3, n19);
+ n20->ReplaceInput(4, n18);
+ n21->ReplaceInput(0, n20);
+ n21->ReplaceInput(1, n20);
+ n21->ReplaceInput(2, n18);
+ n22->ReplaceInput(1, n21);
+ n23->ReplaceInput(0, n22);
+
+ graph()->SetStart(n0);
+ graph()->SetEnd(n23);
+
+ ComputeAndVerifySchedule(20, graph());
+}
+
+
+TEST_F(SchedulerTestWithIsolate, BuildScheduleSimpleLoop) {
+ const Operator* op;
+
+ // Manually transcripted code for:
+ // function turbo_fan_test(a, b) {
+ // while (a < b) {
+ // a++;
+ // }
+ // return a;
+ // }
+ op = common()->Start(0);
+ Node* n0 = graph()->NewNode(op);
+ USE(n0);
+ Node* nil = graph()->NewNode(common()->Dead());
+ op = common()->End();
+ Node* n20 = graph()->NewNode(op, nil);
+ USE(n20);
+ op = common()->Return();
+ Node* n19 = graph()->NewNode(op, nil, nil, nil);
+ USE(n19);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n8 = graph()->NewNode(op, nil, nil, nil);
+ USE(n8);
+ op = common()->Parameter(0);
+ Node* n2 = graph()->NewNode(op, n0);
+ USE(n2);
+ n8->ReplaceInput(0, n2);
+ op = js()->Add();
+ Node* n18 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n18);
+ op = js()->ToNumber();
+ Node* n16 = graph()->NewNode(op, nil, nil, nil, nil);
+ USE(n16);
+ n16->ReplaceInput(0, n8);
+ op = common()->HeapConstant(GetUniqueUndefined());
+ Node* n5 = graph()->NewNode(op);
+ USE(n5);
+ n16->ReplaceInput(1, n5);
+ op = js()->LessThan();
+ Node* n12 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n12);
+ n12->ReplaceInput(0, n8);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n9 = graph()->NewNode(op, nil, nil, nil);
+ USE(n9);
+ op = common()->Parameter(0);
+ Node* n3 = graph()->NewNode(op, n0);
+ USE(n3);
+ n9->ReplaceInput(0, n3);
+ n9->ReplaceInput(1, n9);
+ op = common()->Loop(2);
+ Node* n6 = graph()->NewNode(op, nil, nil);
+ USE(n6);
+ n6->ReplaceInput(0, n0);
+ op = common()->IfTrue();
+ Node* n14 = graph()->NewNode(op, nil);
+ USE(n14);
+ op = common()->Branch();
+ Node* n13 = graph()->NewNode(op, nil, nil);
+ USE(n13);
+ n13->ReplaceInput(0, n12);
+ n13->ReplaceInput(1, n6);
+ n14->ReplaceInput(0, n13);
+ n6->ReplaceInput(1, n14);
+ n9->ReplaceInput(2, n6);
+ n12->ReplaceInput(1, n9);
+ n12->ReplaceInput(2, n5);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n10 = graph()->NewNode(op, nil, nil, nil);
+ USE(n10);
+ n10->ReplaceInput(0, n0);
+ n10->ReplaceInput(1, n18);
+ n10->ReplaceInput(2, n6);
+ n12->ReplaceInput(3, n10);
+ n12->ReplaceInput(4, n6);
+ n16->ReplaceInput(2, n12);
+ n16->ReplaceInput(3, n14);
+ n18->ReplaceInput(0, n16);
+ op = common()->NumberConstant(0);
+ Node* n17 = graph()->NewNode(op);
+ USE(n17);
+ n18->ReplaceInput(1, n17);
+ n18->ReplaceInput(2, n5);
+ n18->ReplaceInput(3, n16);
+ n18->ReplaceInput(4, n14);
+ n8->ReplaceInput(1, n18);
+ n8->ReplaceInput(2, n6);
+ n19->ReplaceInput(0, n8);
+ n19->ReplaceInput(1, n12);
+ op = common()->IfFalse();
+ Node* n15 = graph()->NewNode(op, nil);
+ USE(n15);
+ n15->ReplaceInput(0, n13);
+ n19->ReplaceInput(2, n15);
+ n20->ReplaceInput(0, n19);
+
+ graph()->SetStart(n0);
+ graph()->SetEnd(n20);
+
+ ComputeAndVerifySchedule(19, graph());
+}
+
+
+TEST_F(SchedulerTestWithIsolate, BuildScheduleComplexLoops) {
+ const Operator* op;
+
+ // Manually transcripted code for:
+ // function turbo_fan_test(a, b, c) {
+ // while (a < b) {
+ // a++;
+ // while (c < b) {
+ // c++;
+ // }
+ // }
+ // while (a < b) {
+ // a += 2;
+ // }
+ // return a;
+ // }
+ op = common()->Start(0);
+ Node* n0 = graph()->NewNode(op);
+ USE(n0);
+ Node* nil = graph()->NewNode(common()->Dead());
+ op = common()->End();
+ Node* n46 = graph()->NewNode(op, nil);
+ USE(n46);
+ op = common()->Return();
+ Node* n45 = graph()->NewNode(op, nil, nil, nil);
+ USE(n45);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n35 = graph()->NewNode(op, nil, nil, nil);
+ USE(n35);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n9 = graph()->NewNode(op, nil, nil, nil);
+ USE(n9);
+ op = common()->Parameter(0);
+ Node* n2 = graph()->NewNode(op, n0);
+ USE(n2);
+ n9->ReplaceInput(0, n2);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n23 = graph()->NewNode(op, nil, nil, nil);
+ USE(n23);
+ op = js()->Add();
+ Node* n20 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n20);
+ op = js()->ToNumber();
+ Node* n18 = graph()->NewNode(op, nil, nil, nil, nil);
+ USE(n18);
+ n18->ReplaceInput(0, n9);
+ op = common()->HeapConstant(GetUniqueUndefined());
+ Node* n6 = graph()->NewNode(op);
+ USE(n6);
+ n18->ReplaceInput(1, n6);
+ op = js()->LessThan();
+ Node* n14 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n14);
+ n14->ReplaceInput(0, n9);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n10 = graph()->NewNode(op, nil, nil, nil);
+ USE(n10);
+ op = common()->Parameter(0);
+ Node* n3 = graph()->NewNode(op, n0);
+ USE(n3);
+ n10->ReplaceInput(0, n3);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n24 = graph()->NewNode(op, nil, nil, nil);
+ USE(n24);
+ n24->ReplaceInput(0, n10);
+ n24->ReplaceInput(1, n24);
+ op = common()->Loop(2);
+ Node* n21 = graph()->NewNode(op, nil, nil);
+ USE(n21);
+ op = common()->IfTrue();
+ Node* n16 = graph()->NewNode(op, nil);
+ USE(n16);
+ op = common()->Branch();
+ Node* n15 = graph()->NewNode(op, nil, nil);
+ USE(n15);
+ n15->ReplaceInput(0, n14);
+ op = common()->Loop(2);
+ Node* n7 = graph()->NewNode(op, nil, nil);
+ USE(n7);
+ n7->ReplaceInput(0, n0);
+ op = common()->IfFalse();
+ Node* n30 = graph()->NewNode(op, nil);
+ USE(n30);
+ op = common()->Branch();
+ Node* n28 = graph()->NewNode(op, nil, nil);
+ USE(n28);
+ op = js()->LessThan();
+ Node* n27 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n27);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n25 = graph()->NewNode(op, nil, nil, nil);
+ USE(n25);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n11 = graph()->NewNode(op, nil, nil, nil);
+ USE(n11);
+ op = common()->Parameter(0);
+ Node* n4 = graph()->NewNode(op, n0);
+ USE(n4);
+ n11->ReplaceInput(0, n4);
+ n11->ReplaceInput(1, n25);
+ n11->ReplaceInput(2, n7);
+ n25->ReplaceInput(0, n11);
+ op = js()->Add();
+ Node* n32 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n32);
+ op = js()->ToNumber();
+ Node* n31 = graph()->NewNode(op, nil, nil, nil, nil);
+ USE(n31);
+ n31->ReplaceInput(0, n25);
+ n31->ReplaceInput(1, n6);
+ n31->ReplaceInput(2, n27);
+ op = common()->IfTrue();
+ Node* n29 = graph()->NewNode(op, nil);
+ USE(n29);
+ n29->ReplaceInput(0, n28);
+ n31->ReplaceInput(3, n29);
+ n32->ReplaceInput(0, n31);
+ op = common()->NumberConstant(0);
+ Node* n19 = graph()->NewNode(op);
+ USE(n19);
+ n32->ReplaceInput(1, n19);
+ n32->ReplaceInput(2, n6);
+ n32->ReplaceInput(3, n31);
+ n32->ReplaceInput(4, n29);
+ n25->ReplaceInput(1, n32);
+ n25->ReplaceInput(2, n21);
+ n27->ReplaceInput(0, n25);
+ n27->ReplaceInput(1, n24);
+ n27->ReplaceInput(2, n6);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n26 = graph()->NewNode(op, nil, nil, nil);
+ USE(n26);
+ n26->ReplaceInput(0, n20);
+ n26->ReplaceInput(1, n32);
+ n26->ReplaceInput(2, n21);
+ n27->ReplaceInput(3, n26);
+ n27->ReplaceInput(4, n21);
+ n28->ReplaceInput(0, n27);
+ n28->ReplaceInput(1, n21);
+ n30->ReplaceInput(0, n28);
+ n7->ReplaceInput(1, n30);
+ n15->ReplaceInput(1, n7);
+ n16->ReplaceInput(0, n15);
+ n21->ReplaceInput(0, n16);
+ n21->ReplaceInput(1, n29);
+ n24->ReplaceInput(2, n21);
+ n10->ReplaceInput(1, n24);
+ n10->ReplaceInput(2, n7);
+ n14->ReplaceInput(1, n10);
+ n14->ReplaceInput(2, n6);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n12 = graph()->NewNode(op, nil, nil, nil);
+ USE(n12);
+ n12->ReplaceInput(0, n0);
+ n12->ReplaceInput(1, n27);
+ n12->ReplaceInput(2, n7);
+ n14->ReplaceInput(3, n12);
+ n14->ReplaceInput(4, n7);
+ n18->ReplaceInput(2, n14);
+ n18->ReplaceInput(3, n16);
+ n20->ReplaceInput(0, n18);
+ n20->ReplaceInput(1, n19);
+ n20->ReplaceInput(2, n6);
+ n20->ReplaceInput(3, n18);
+ n20->ReplaceInput(4, n16);
+ n23->ReplaceInput(0, n20);
+ n23->ReplaceInput(1, n23);
+ n23->ReplaceInput(2, n21);
+ n9->ReplaceInput(1, n23);
+ n9->ReplaceInput(2, n7);
+ n35->ReplaceInput(0, n9);
+ op = js()->Add();
+ Node* n44 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n44);
+ n44->ReplaceInput(0, n35);
+ op = common()->NumberConstant(0);
+ Node* n43 = graph()->NewNode(op);
+ USE(n43);
+ n44->ReplaceInput(1, n43);
+ n44->ReplaceInput(2, n6);
+ op = js()->LessThan();
+ Node* n39 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n39);
+ n39->ReplaceInput(0, n35);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n36 = graph()->NewNode(op, nil, nil, nil);
+ USE(n36);
+ n36->ReplaceInput(0, n10);
+ n36->ReplaceInput(1, n36);
+ op = common()->Loop(2);
+ Node* n33 = graph()->NewNode(op, nil, nil);
+ USE(n33);
+ op = common()->IfFalse();
+ Node* n17 = graph()->NewNode(op, nil);
+ USE(n17);
+ n17->ReplaceInput(0, n15);
+ n33->ReplaceInput(0, n17);
+ op = common()->IfTrue();
+ Node* n41 = graph()->NewNode(op, nil);
+ USE(n41);
+ op = common()->Branch();
+ Node* n40 = graph()->NewNode(op, nil, nil);
+ USE(n40);
+ n40->ReplaceInput(0, n39);
+ n40->ReplaceInput(1, n33);
+ n41->ReplaceInput(0, n40);
+ n33->ReplaceInput(1, n41);
+ n36->ReplaceInput(2, n33);
+ n39->ReplaceInput(1, n36);
+ n39->ReplaceInput(2, n6);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n38 = graph()->NewNode(op, nil, nil, nil);
+ USE(n38);
+ n38->ReplaceInput(0, n14);
+ n38->ReplaceInput(1, n44);
+ n38->ReplaceInput(2, n33);
+ n39->ReplaceInput(3, n38);
+ n39->ReplaceInput(4, n33);
+ n44->ReplaceInput(3, n39);
+ n44->ReplaceInput(4, n41);
+ n35->ReplaceInput(1, n44);
+ n35->ReplaceInput(2, n33);
+ n45->ReplaceInput(0, n35);
+ n45->ReplaceInput(1, n39);
+ op = common()->IfFalse();
+ Node* n42 = graph()->NewNode(op, nil);
+ USE(n42);
+ n42->ReplaceInput(0, n40);
+ n45->ReplaceInput(2, n42);
+ n46->ReplaceInput(0, n45);
+
+ graph()->SetStart(n0);
+ graph()->SetEnd(n46);
+
+ ComputeAndVerifySchedule(46, graph());
+}
+
+
+TEST_F(SchedulerTestWithIsolate, BuildScheduleBreakAndContinue) {
+ const Operator* op;
+
+ // Manually transcripted code for:
+ // function turbo_fan_test(a, b, c) {
+ // var d = 0;
+ // while (a < b) {
+ // a++;
+ // while (c < b) {
+ // c++;
+ // if (d == 0) break;
+ // a++;
+ // }
+ // if (a == 1) continue;
+ // d++;
+ // }
+ // return a + d;
+ // }
+ op = common()->Start(0);
+ Node* n0 = graph()->NewNode(op);
+ USE(n0);
+ Node* nil = graph()->NewNode(common()->Dead());
+ op = common()->End();
+ Node* n58 = graph()->NewNode(op, nil);
+ USE(n58);
+ op = common()->Return();
+ Node* n57 = graph()->NewNode(op, nil, nil, nil);
+ USE(n57);
+ op = js()->Add();
+ Node* n56 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n56);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n10 = graph()->NewNode(op, nil, nil, nil);
+ USE(n10);
+ op = common()->Parameter(0);
+ Node* n2 = graph()->NewNode(op, n0);
+ USE(n2);
+ n10->ReplaceInput(0, n2);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n25 = graph()->NewNode(op, nil, nil, nil);
+ USE(n25);
+ op = js()->Add();
+ Node* n22 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n22);
+ op = js()->ToNumber();
+ Node* n20 = graph()->NewNode(op, nil, nil, nil, nil);
+ USE(n20);
+ n20->ReplaceInput(0, n10);
+ op = common()->HeapConstant(GetUniqueUndefined());
+ Node* n6 = graph()->NewNode(op);
+ USE(n6);
+ n20->ReplaceInput(1, n6);
+ op = js()->LessThan();
+ Node* n16 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n16);
+ n16->ReplaceInput(0, n10);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n11 = graph()->NewNode(op, nil, nil, nil);
+ USE(n11);
+ op = common()->Parameter(0);
+ Node* n3 = graph()->NewNode(op, n0);
+ USE(n3);
+ n11->ReplaceInput(0, n3);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n26 = graph()->NewNode(op, nil, nil, nil);
+ USE(n26);
+ n26->ReplaceInput(0, n11);
+ n26->ReplaceInput(1, n26);
+ op = common()->Loop(2);
+ Node* n23 = graph()->NewNode(op, nil, nil);
+ USE(n23);
+ op = common()->IfTrue();
+ Node* n18 = graph()->NewNode(op, nil);
+ USE(n18);
+ op = common()->Branch();
+ Node* n17 = graph()->NewNode(op, nil, nil);
+ USE(n17);
+ n17->ReplaceInput(0, n16);
+ op = common()->Loop(2);
+ Node* n8 = graph()->NewNode(op, nil, nil);
+ USE(n8);
+ n8->ReplaceInput(0, n0);
+ op = common()->Merge(2);
+ Node* n53 = graph()->NewNode(op, nil, nil);
+ USE(n53);
+ op = common()->IfTrue();
+ Node* n49 = graph()->NewNode(op, nil);
+ USE(n49);
+ op = common()->Branch();
+ Node* n48 = graph()->NewNode(op, nil, nil);
+ USE(n48);
+ op = js()->Equal();
+ Node* n47 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n47);
+ n47->ReplaceInput(0, n25);
+ op = common()->NumberConstant(0);
+ Node* n46 = graph()->NewNode(op);
+ USE(n46);
+ n47->ReplaceInput(1, n46);
+ n47->ReplaceInput(2, n6);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n42 = graph()->NewNode(op, nil, nil, nil);
+ USE(n42);
+ op = js()->LessThan();
+ Node* n30 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n30);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n27 = graph()->NewNode(op, nil, nil, nil);
+ USE(n27);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n12 = graph()->NewNode(op, nil, nil, nil);
+ USE(n12);
+ op = common()->Parameter(0);
+ Node* n4 = graph()->NewNode(op, n0);
+ USE(n4);
+ n12->ReplaceInput(0, n4);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n41 = graph()->NewNode(op, nil, nil, nil);
+ USE(n41);
+ n41->ReplaceInput(0, n27);
+ op = js()->Add();
+ Node* n35 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n35);
+ op = js()->ToNumber();
+ Node* n34 = graph()->NewNode(op, nil, nil, nil, nil);
+ USE(n34);
+ n34->ReplaceInput(0, n27);
+ n34->ReplaceInput(1, n6);
+ n34->ReplaceInput(2, n30);
+ op = common()->IfTrue();
+ Node* n32 = graph()->NewNode(op, nil);
+ USE(n32);
+ op = common()->Branch();
+ Node* n31 = graph()->NewNode(op, nil, nil);
+ USE(n31);
+ n31->ReplaceInput(0, n30);
+ n31->ReplaceInput(1, n23);
+ n32->ReplaceInput(0, n31);
+ n34->ReplaceInput(3, n32);
+ n35->ReplaceInput(0, n34);
+ op = common()->NumberConstant(0);
+ Node* n21 = graph()->NewNode(op);
+ USE(n21);
+ n35->ReplaceInput(1, n21);
+ n35->ReplaceInput(2, n6);
+ n35->ReplaceInput(3, n34);
+ n35->ReplaceInput(4, n32);
+ n41->ReplaceInput(1, n35);
+ op = common()->Merge(2);
+ Node* n40 = graph()->NewNode(op, nil, nil);
+ USE(n40);
+ op = common()->IfFalse();
+ Node* n33 = graph()->NewNode(op, nil);
+ USE(n33);
+ n33->ReplaceInput(0, n31);
+ n40->ReplaceInput(0, n33);
+ op = common()->IfTrue();
+ Node* n39 = graph()->NewNode(op, nil);
+ USE(n39);
+ op = common()->Branch();
+ Node* n38 = graph()->NewNode(op, nil, nil);
+ USE(n38);
+ op = js()->Equal();
+ Node* n37 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n37);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n28 = graph()->NewNode(op, nil, nil, nil);
+ USE(n28);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n13 = graph()->NewNode(op, nil, nil, nil);
+ USE(n13);
+ op = common()->NumberConstant(0);
+ Node* n7 = graph()->NewNode(op);
+ USE(n7);
+ n13->ReplaceInput(0, n7);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n54 = graph()->NewNode(op, nil, nil, nil);
+ USE(n54);
+ n54->ReplaceInput(0, n28);
+ op = js()->Add();
+ Node* n52 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n52);
+ op = js()->ToNumber();
+ Node* n51 = graph()->NewNode(op, nil, nil, nil, nil);
+ USE(n51);
+ n51->ReplaceInput(0, n28);
+ n51->ReplaceInput(1, n6);
+ n51->ReplaceInput(2, n47);
+ op = common()->IfFalse();
+ Node* n50 = graph()->NewNode(op, nil);
+ USE(n50);
+ n50->ReplaceInput(0, n48);
+ n51->ReplaceInput(3, n50);
+ n52->ReplaceInput(0, n51);
+ n52->ReplaceInput(1, n21);
+ n52->ReplaceInput(2, n6);
+ n52->ReplaceInput(3, n51);
+ n52->ReplaceInput(4, n50);
+ n54->ReplaceInput(1, n52);
+ n54->ReplaceInput(2, n53);
+ n13->ReplaceInput(1, n54);
+ n13->ReplaceInput(2, n8);
+ n28->ReplaceInput(0, n13);
+ n28->ReplaceInput(1, n28);
+ n28->ReplaceInput(2, n23);
+ n37->ReplaceInput(0, n28);
+ op = common()->NumberConstant(0);
+ Node* n36 = graph()->NewNode(op);
+ USE(n36);
+ n37->ReplaceInput(1, n36);
+ n37->ReplaceInput(2, n6);
+ n37->ReplaceInput(3, n35);
+ n37->ReplaceInput(4, n32);
+ n38->ReplaceInput(0, n37);
+ n38->ReplaceInput(1, n32);
+ n39->ReplaceInput(0, n38);
+ n40->ReplaceInput(1, n39);
+ n41->ReplaceInput(2, n40);
+ n12->ReplaceInput(1, n41);
+ n12->ReplaceInput(2, n8);
+ n27->ReplaceInput(0, n12);
+ n27->ReplaceInput(1, n35);
+ n27->ReplaceInput(2, n23);
+ n30->ReplaceInput(0, n27);
+ n30->ReplaceInput(1, n26);
+ n30->ReplaceInput(2, n6);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n29 = graph()->NewNode(op, nil, nil, nil);
+ USE(n29);
+ n29->ReplaceInput(0, n22);
+ op = js()->Add();
+ Node* n45 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n45);
+ op = js()->ToNumber();
+ Node* n44 = graph()->NewNode(op, nil, nil, nil, nil);
+ USE(n44);
+ n44->ReplaceInput(0, n25);
+ n44->ReplaceInput(1, n6);
+ n44->ReplaceInput(2, n37);
+ op = common()->IfFalse();
+ Node* n43 = graph()->NewNode(op, nil);
+ USE(n43);
+ n43->ReplaceInput(0, n38);
+ n44->ReplaceInput(3, n43);
+ n45->ReplaceInput(0, n44);
+ n45->ReplaceInput(1, n21);
+ n45->ReplaceInput(2, n6);
+ n45->ReplaceInput(3, n44);
+ n45->ReplaceInput(4, n43);
+ n29->ReplaceInput(1, n45);
+ n29->ReplaceInput(2, n23);
+ n30->ReplaceInput(3, n29);
+ n30->ReplaceInput(4, n23);
+ n42->ReplaceInput(0, n30);
+ n42->ReplaceInput(1, n37);
+ n42->ReplaceInput(2, n40);
+ n47->ReplaceInput(3, n42);
+ n47->ReplaceInput(4, n40);
+ n48->ReplaceInput(0, n47);
+ n48->ReplaceInput(1, n40);
+ n49->ReplaceInput(0, n48);
+ n53->ReplaceInput(0, n49);
+ n53->ReplaceInput(1, n50);
+ n8->ReplaceInput(1, n53);
+ n17->ReplaceInput(1, n8);
+ n18->ReplaceInput(0, n17);
+ n23->ReplaceInput(0, n18);
+ n23->ReplaceInput(1, n43);
+ n26->ReplaceInput(2, n23);
+ n11->ReplaceInput(1, n26);
+ n11->ReplaceInput(2, n8);
+ n16->ReplaceInput(1, n11);
+ n16->ReplaceInput(2, n6);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n14 = graph()->NewNode(op, nil, nil, nil);
+ USE(n14);
+ n14->ReplaceInput(0, n0);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n55 = graph()->NewNode(op, nil, nil, nil);
+ USE(n55);
+ n55->ReplaceInput(0, n47);
+ n55->ReplaceInput(1, n52);
+ n55->ReplaceInput(2, n53);
+ n14->ReplaceInput(1, n55);
+ n14->ReplaceInput(2, n8);
+ n16->ReplaceInput(3, n14);
+ n16->ReplaceInput(4, n8);
+ n20->ReplaceInput(2, n16);
+ n20->ReplaceInput(3, n18);
+ n22->ReplaceInput(0, n20);
+ n22->ReplaceInput(1, n21);
+ n22->ReplaceInput(2, n6);
+ n22->ReplaceInput(3, n20);
+ n22->ReplaceInput(4, n18);
+ n25->ReplaceInput(0, n22);
+ n25->ReplaceInput(1, n45);
+ n25->ReplaceInput(2, n23);
+ n10->ReplaceInput(1, n25);
+ n10->ReplaceInput(2, n8);
+ n56->ReplaceInput(0, n10);
+ n56->ReplaceInput(1, n13);
+ n56->ReplaceInput(2, n6);
+ n56->ReplaceInput(3, n16);
+ op = common()->IfFalse();
+ Node* n19 = graph()->NewNode(op, nil);
+ USE(n19);
+ n19->ReplaceInput(0, n17);
+ n56->ReplaceInput(4, n19);
+ n57->ReplaceInput(0, n56);
+ n57->ReplaceInput(1, n56);
+ n57->ReplaceInput(2, n19);
+ n58->ReplaceInput(0, n57);
+
+ graph()->SetStart(n0);
+ graph()->SetEnd(n58);
+
+ ComputeAndVerifySchedule(62, graph());
+}
+
+
+TEST_F(SchedulerTestWithIsolate, BuildScheduleSimpleLoopWithCodeMotion) {
+ const Operator* op;
+
+ // Manually transcripted code for:
+ // function turbo_fan_test(a, b, c) {
+ // while (a < b) {
+ // a += b + c;
+ // }
+ // return a;
+ // }
+ op = common()->Start(0);
+ Node* n0 = graph()->NewNode(op);
+ USE(n0);
+ Node* nil = graph()->NewNode(common()->Dead());
+ op = common()->End();
+ Node* n22 = graph()->NewNode(op, nil);
+ USE(n22);
+ op = common()->Return();
+ Node* n21 = graph()->NewNode(op, nil, nil, nil);
+ USE(n21);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n9 = graph()->NewNode(op, nil, nil, nil);
+ USE(n9);
+ op = common()->Parameter(0);
+ Node* n2 = graph()->NewNode(op, n0);
+ USE(n2);
+ n9->ReplaceInput(0, n2);
+ op = js()->Add();
+ Node* n20 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n20);
+ n20->ReplaceInput(0, n9);
+ op = &kIntAdd;
+ Node* n19 = graph()->NewNode(op, nil, nil);
+ USE(n19);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n10 = graph()->NewNode(op, nil, nil, nil);
+ USE(n10);
+ op = common()->Parameter(0);
+ Node* n3 = graph()->NewNode(op, n0);
+ USE(n3);
+ n10->ReplaceInput(0, n3);
+ n10->ReplaceInput(1, n10);
+ op = common()->Loop(2);
+ Node* n7 = graph()->NewNode(op, nil, nil);
+ USE(n7);
+ n7->ReplaceInput(0, n0);
+ op = common()->IfTrue();
+ Node* n17 = graph()->NewNode(op, nil);
+ USE(n17);
+ op = common()->Branch();
+ Node* n16 = graph()->NewNode(op, nil, nil);
+ USE(n16);
+ op = js()->ToBoolean();
+ Node* n15 = graph()->NewNode(op, nil, nil, nil, nil);
+ USE(n15);
+ op = js()->LessThan();
+ Node* n14 = graph()->NewNode(op, nil, nil, nil, nil, nil);
+ USE(n14);
+ n14->ReplaceInput(0, n9);
+ n14->ReplaceInput(1, n10);
+ op = common()->HeapConstant(GetUniqueUndefined());
+ Node* n6 = graph()->NewNode(op);
+ USE(n6);
+ n14->ReplaceInput(2, n6);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n12 = graph()->NewNode(op, nil, nil, nil);
+ USE(n12);
+ n12->ReplaceInput(0, n0);
+ n12->ReplaceInput(1, n20);
+ n12->ReplaceInput(2, n7);
+ n14->ReplaceInput(3, n12);
+ n14->ReplaceInput(4, n7);
+ n15->ReplaceInput(0, n14);
+ n15->ReplaceInput(1, n6);
+ n15->ReplaceInput(2, n14);
+ n15->ReplaceInput(3, n7);
+ n16->ReplaceInput(0, n15);
+ n16->ReplaceInput(1, n7);
+ n17->ReplaceInput(0, n16);
+ n7->ReplaceInput(1, n17);
+ n10->ReplaceInput(2, n7);
+ n19->ReplaceInput(0, n2);
+ op = common()->Phi(kMachAnyTagged, 2);
+ Node* n11 = graph()->NewNode(op, nil, nil, nil);
+ USE(n11);
+ op = common()->Parameter(0);
+ Node* n4 = graph()->NewNode(op, n0);
+ USE(n4);
+ n11->ReplaceInput(0, n4);
+ n11->ReplaceInput(1, n11);
+ n11->ReplaceInput(2, n7);
+ n19->ReplaceInput(1, n3);
+ n20->ReplaceInput(1, n19);
+ n20->ReplaceInput(2, n6);
+ n20->ReplaceInput(3, n19);
+ n20->ReplaceInput(4, n17);
+ n9->ReplaceInput(1, n20);
+ n9->ReplaceInput(2, n7);
+ n21->ReplaceInput(0, n9);
+ n21->ReplaceInput(1, n15);
+ op = common()->IfFalse();
+ Node* n18 = graph()->NewNode(op, nil);
+ USE(n18);
+ n18->ReplaceInput(0, n16);
+ n21->ReplaceInput(2, n18);
+ n22->ReplaceInput(0, n21);
+
+ graph()->SetStart(n0);
+ graph()->SetEnd(n22);
+
+ Schedule* schedule = ComputeAndVerifySchedule(19, graph());
+ // Make sure the integer-only add gets hoisted to a different block that the
+ // JSAdd.
+ CHECK(schedule->block(n19) != schedule->block(n20));
+}
+
+
+namespace {
+
+Node* CreateDiamond(Graph* graph, CommonOperatorBuilder* common, Node* cond) {
+ Node* tv = graph->NewNode(common->Int32Constant(6));
+ Node* fv = graph->NewNode(common->Int32Constant(7));
+ Node* br = graph->NewNode(common->Branch(), cond, graph->start());
+ Node* t = graph->NewNode(common->IfTrue(), br);
+ Node* f = graph->NewNode(common->IfFalse(), br);
+ Node* m = graph->NewNode(common->Merge(2), t, f);
+ Node* phi = graph->NewNode(common->Phi(kMachAnyTagged, 2), tv, fv, m);
+ return phi;
+}
+
+} // namespace
+
+
+TARGET_TEST_F(SchedulerTest, FloatingDiamond1) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* d1 = CreateDiamond(graph(), common(), p0);
+ Node* ret = graph()->NewNode(common()->Return(), d1, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret, start);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(13, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, FloatingDiamond2) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* d1 = CreateDiamond(graph(), common(), p0);
+ Node* d2 = CreateDiamond(graph(), common(), p1);
+ Node* add = graph()->NewNode(&kIntAdd, d1, d2);
+ Node* ret = graph()->NewNode(common()->Return(), add, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret, start);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(24, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, FloatingDiamond3) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* d1 = CreateDiamond(graph(), common(), p0);
+ Node* d2 = CreateDiamond(graph(), common(), p1);
+ Node* add = graph()->NewNode(&kIntAdd, d1, d2);
+ Node* d3 = CreateDiamond(graph(), common(), add);
+ Node* ret = graph()->NewNode(common()->Return(), d3, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret, start);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(33, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, NestedFloatingDiamonds) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* fv = graph()->NewNode(common()->Int32Constant(7));
+ Node* br = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ Node* map = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), p0, p0,
+ p0, start, f);
+ Node* br1 = graph()->NewNode(common()->Branch(), map, graph()->start());
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+ Node* m1 = graph()->NewNode(common()->Merge(2), t1, f1);
+ Node* ttrue = graph()->NewNode(common()->Int32Constant(1));
+ Node* ffalse = graph()->NewNode(common()->Int32Constant(0));
+ Node* phi1 =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), ttrue, ffalse, m1);
+
+
+ Node* m = graph()->NewNode(common()->Merge(2), t, f);
+ Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fv, phi1, m);
+ Node* ephi1 = graph()->NewNode(common()->EffectPhi(2), start, map, m);
+
+ Node* ret = graph()->NewNode(common()->Return(), phi, ephi1, start);
+ Node* end = graph()->NewNode(common()->End(), ret, start);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(23, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithChain) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* c = graph()->NewNode(common()->Int32Constant(7));
+
+ Node* brA1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* tA1 = graph()->NewNode(common()->IfTrue(), brA1);
+ Node* fA1 = graph()->NewNode(common()->IfFalse(), brA1);
+ Node* mA1 = graph()->NewNode(common()->Merge(2), tA1, fA1);
+ Node* phiA1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p1, mA1);
+
+ Node* brB1 = graph()->NewNode(common()->Branch(), p1, graph()->start());
+ Node* tB1 = graph()->NewNode(common()->IfTrue(), brB1);
+ Node* fB1 = graph()->NewNode(common()->IfFalse(), brB1);
+ Node* mB1 = graph()->NewNode(common()->Merge(2), tB1, fB1);
+ Node* phiB1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p1, mB1);
+
+ Node* brA2 = graph()->NewNode(common()->Branch(), phiB1, mA1);
+ Node* tA2 = graph()->NewNode(common()->IfTrue(), brA2);
+ Node* fA2 = graph()->NewNode(common()->IfFalse(), brA2);
+ Node* mA2 = graph()->NewNode(common()->Merge(2), tA2, fA2);
+ Node* phiA2 =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), phiB1, c, mA2);
+
+ Node* brB2 = graph()->NewNode(common()->Branch(), phiA1, mB1);
+ Node* tB2 = graph()->NewNode(common()->IfTrue(), brB2);
+ Node* fB2 = graph()->NewNode(common()->IfFalse(), brB2);
+ Node* mB2 = graph()->NewNode(common()->Merge(2), tB2, fB2);
+ Node* phiB2 =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), phiA1, c, mB2);
+
+ Node* add = graph()->NewNode(&kIntAdd, phiA2, phiB2);
+ Node* ret = graph()->NewNode(common()->Return(), add, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret, start);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(36, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithLoop) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* fv = graph()->NewNode(common()->Int32Constant(7));
+ Node* br = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ Node* loop = graph()->NewNode(common()->Loop(2), f, start);
+ Node* ind = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+
+ Node* add = graph()->NewNode(&kIntAdd, ind, fv);
+ Node* br1 = graph()->NewNode(common()->Branch(), add, loop);
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+
+ loop->ReplaceInput(1, t1); // close loop.
+ ind->ReplaceInput(1, ind); // close induction variable.
+
+ Node* m = graph()->NewNode(common()->Merge(2), t, f1);
+ Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fv, ind, m);
+
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret, start);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(20, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond1) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* c = graph()->NewNode(common()->Int32Constant(7));
+ Node* loop = graph()->NewNode(common()->Loop(2), start, start);
+ Node* ind = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+ Node* add = graph()->NewNode(&kIntAdd, ind, c);
+
+ Node* br = graph()->NewNode(common()->Branch(), add, loop);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ Node* br1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+ Node* m1 = graph()->NewNode(common()->Merge(2), t1, f1);
+ Node* phi1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), add, p0, m1);
+
+ loop->ReplaceInput(1, t); // close loop.
+ ind->ReplaceInput(1, phi1); // close induction variable.
+
+ Node* ret = graph()->NewNode(common()->Return(), ind, start, f);
+ Node* end = graph()->NewNode(common()->End(), ret, f);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(20, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond2) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* c = graph()->NewNode(common()->Int32Constant(7));
+ Node* loop = graph()->NewNode(common()->Loop(2), start, start);
+ Node* ind = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+
+ Node* br1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+ Node* m1 = graph()->NewNode(common()->Merge(2), t1, f1);
+ Node* phi1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), c, ind, m1);
+
+ Node* add = graph()->NewNode(&kIntAdd, ind, phi1);
+
+ Node* br = graph()->NewNode(common()->Branch(), add, loop);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ loop->ReplaceInput(1, t); // close loop.
+ ind->ReplaceInput(1, add); // close induction variable.
+
+ Node* ret = graph()->NewNode(common()->Return(), ind, start, f);
+ Node* end = graph()->NewNode(common()->End(), ret, f);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(20, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond3) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* c = graph()->NewNode(common()->Int32Constant(7));
+ Node* loop = graph()->NewNode(common()->Loop(2), start, start);
+ Node* ind = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+
+ Node* br1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+
+ Node* loop1 = graph()->NewNode(common()->Loop(2), t1, start);
+ Node* ind1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+
+ Node* add1 = graph()->NewNode(&kIntAdd, ind1, c);
+ Node* br2 = graph()->NewNode(common()->Branch(), add1, loop1);
+ Node* t2 = graph()->NewNode(common()->IfTrue(), br2);
+ Node* f2 = graph()->NewNode(common()->IfFalse(), br2);
+
+ loop1->ReplaceInput(1, t2); // close inner loop.
+ ind1->ReplaceInput(1, ind1); // close inner induction variable.
+
+ Node* m1 = graph()->NewNode(common()->Merge(2), f1, f2);
+ Node* phi1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), c, ind1, m1);
+
+ Node* add = graph()->NewNode(&kIntAdd, ind, phi1);
+
+ Node* br = graph()->NewNode(common()->Branch(), add, loop);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ loop->ReplaceInput(1, t); // close loop.
+ ind->ReplaceInput(1, add); // close induction variable.
+
+ Node* ret = graph()->NewNode(common()->Return(), ind, start, f);
+ Node* end = graph()->NewNode(common()->End(), ret, f);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(28, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, PhisPushedDownToDifferentBranches) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+
+ Node* v1 = graph()->NewNode(common()->Int32Constant(1));
+ Node* v2 = graph()->NewNode(common()->Int32Constant(2));
+ Node* v3 = graph()->NewNode(common()->Int32Constant(3));
+ Node* v4 = graph()->NewNode(common()->Int32Constant(4));
+ Node* br = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+ Node* m = graph()->NewNode(common()->Merge(2), t, f);
+ Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), v1, v2, m);
+ Node* phi2 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), v3, v4, m);
+
+ Node* br2 = graph()->NewNode(common()->Branch(), p1, graph()->start());
+ Node* t2 = graph()->NewNode(common()->IfTrue(), br2);
+ Node* f2 = graph()->NewNode(common()->IfFalse(), br2);
+ Node* m2 = graph()->NewNode(common()->Merge(2), t2, f2);
+ Node* phi3 =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), phi, phi2, m2);
+
+ Node* ret = graph()->NewNode(common()->Return(), phi3, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret, start);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(24, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, BranchHintTrue) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* tv = graph()->NewNode(common()->Int32Constant(6));
+ Node* fv = graph()->NewNode(common()->Int32Constant(7));
+ Node* br = graph()->NewNode(common()->Branch(BranchHint::kTrue), p0, start);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+ Node* m = graph()->NewNode(common()->Merge(2), t, f);
+ Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), tv, fv, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret, start);
+
+ graph()->SetEnd(end);
+
+ Schedule* schedule = ComputeAndVerifySchedule(13, graph());
+ // Make sure the false block is marked as deferred.
+ CHECK(!schedule->block(t)->deferred());
+ CHECK(schedule->block(f)->deferred());
+}
+
+
+TARGET_TEST_F(SchedulerTest, BranchHintFalse) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* tv = graph()->NewNode(common()->Int32Constant(6));
+ Node* fv = graph()->NewNode(common()->Int32Constant(7));
+ Node* br = graph()->NewNode(common()->Branch(BranchHint::kFalse), p0, start);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+ Node* m = graph()->NewNode(common()->Merge(2), t, f);
+ Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), tv, fv, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret, start);
+
+ graph()->SetEnd(end);
+
+ Schedule* schedule = ComputeAndVerifySchedule(13, graph());
+ // Make sure the true block is marked as deferred.
+ CHECK(schedule->block(t)->deferred());
+ CHECK(!schedule->block(f)->deferred());
+}
+
+
+TARGET_TEST_F(SchedulerTest, Switch) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* sw = graph()->NewNode(common()->Switch(3), p0, start);
+ Node* c0 = graph()->NewNode(common()->IfValue(0), sw);
+ Node* v0 = graph()->NewNode(common()->Int32Constant(11));
+ Node* c1 = graph()->NewNode(common()->IfValue(1), sw);
+ Node* v1 = graph()->NewNode(common()->Int32Constant(22));
+ Node* d = graph()->NewNode(common()->IfDefault(), sw);
+ Node* vd = graph()->NewNode(common()->Int32Constant(33));
+ Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
+ Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 3), v0, v1, vd, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
+ Node* end = graph()->NewNode(common()->End(), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(16, graph());
+}
+
+
+TARGET_TEST_F(SchedulerTest, FloatingSwitch) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* sw = graph()->NewNode(common()->Switch(3), p0, start);
+ Node* c0 = graph()->NewNode(common()->IfValue(0), sw);
+ Node* v0 = graph()->NewNode(common()->Int32Constant(11));
+ Node* c1 = graph()->NewNode(common()->IfValue(1), sw);
+ Node* v1 = graph()->NewNode(common()->Int32Constant(22));
+ Node* d = graph()->NewNode(common()->IfDefault(), sw);
+ Node* vd = graph()->NewNode(common()->Int32Constant(33));
+ Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
+ Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 3), v0, v1, vd, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* end = graph()->NewNode(common()->End(), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(16, graph());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index e5f46c0d53..38924123df 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -4,7 +4,7 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
#include "src/conversions.h"
@@ -30,7 +30,7 @@ class SimplifiedOperatorReducerTest : public TypedGraphTest {
Reduction Reduce(Node* node) {
MachineOperatorBuilder machine(zone());
JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(graph(), common(), &javascript, &machine);
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, &machine);
SimplifiedOperatorReducer reducer(&jsgraph);
return reducer.Reduce(node);
}
@@ -55,53 +55,42 @@ class SimplifiedOperatorReducerTestWithParam
namespace {
-static const double kFloat64Values[] = {
- -V8_INFINITY, -6.52696e+290, -1.05768e+290, -5.34203e+268, -1.01997e+268,
+const double kFloat64Values[] = {
+ -V8_INFINITY, -6.52696e+290, -1.05768e+290, -5.34203e+268, -1.01997e+268,
-8.22758e+266, -1.58402e+261, -5.15246e+241, -5.92107e+226, -1.21477e+226,
- -1.67913e+188, -1.6257e+184, -2.60043e+170, -2.52941e+168, -3.06033e+116,
- -4.56201e+52, -3.56788e+50, -9.9066e+38, -3.07261e+31, -2.1271e+09,
- -1.91489e+09, -1.73053e+09, -9.30675e+08, -26030, -20453,
- -15790, -11699, -111, -97, -78,
- -63, -58, -1.53858e-06, -2.98914e-12, -1.14741e-39,
- -8.20347e-57, -1.48932e-59, -3.17692e-66, -8.93103e-81, -3.91337e-83,
- -6.0489e-92, -8.83291e-113, -4.28266e-117, -1.92058e-178, -2.0567e-192,
+ -1.67913e+188, -1.6257e+184, -2.60043e+170, -2.52941e+168, -3.06033e+116,
+ -4.56201e+52, -3.56788e+50, -9.9066e+38, -3.07261e+31, -2.1271e+09,
+ -1.91489e+09, -1.73053e+09, -9.30675e+08, -26030, -20453, -15790, -11699,
+ -111, -97, -78, -63, -58, -1.53858e-06, -2.98914e-12, -1.14741e-39,
+ -8.20347e-57, -1.48932e-59, -3.17692e-66, -8.93103e-81, -3.91337e-83,
+ -6.0489e-92, -8.83291e-113, -4.28266e-117, -1.92058e-178, -2.0567e-192,
-1.68167e-194, -1.51841e-214, -3.98738e-234, -7.31851e-242, -2.21875e-253,
- -1.11612e-293, -0.0, 0.0, 2.22507e-308, 1.06526e-307,
- 4.16643e-227, 6.76624e-223, 2.0432e-197, 3.16254e-184, 1.37315e-173,
- 2.88603e-172, 1.54155e-99, 4.42923e-81, 1.40539e-73, 5.4462e-73,
- 1.24064e-58, 3.11167e-58, 2.75826e-39, 0.143815, 58,
- 67, 601, 7941, 11644, 13697,
- 25680, 29882, 1.32165e+08, 1.62439e+08, 4.16837e+08,
- 9.59097e+08, 1.32491e+09, 1.8728e+09, 1.0672e+17, 2.69606e+46,
- 1.98285e+79, 1.0098e+82, 7.93064e+88, 3.67444e+121, 9.36506e+123,
- 7.27954e+162, 3.05316e+168, 1.16171e+175, 1.64771e+189, 1.1622e+202,
- 2.00748e+239, 2.51778e+244, 3.90282e+306, 1.79769e+308, V8_INFINITY};
-
-
-static const int32_t kInt32Values[] = {
+ -1.11612e-293, -0.0, 0.0, 2.22507e-308, 1.06526e-307, 4.16643e-227,
+ 6.76624e-223, 2.0432e-197, 3.16254e-184, 1.37315e-173, 2.88603e-172,
+ 1.54155e-99, 4.42923e-81, 1.40539e-73, 5.4462e-73, 1.24064e-58, 3.11167e-58,
+ 2.75826e-39, 0.143815, 58, 67, 601, 7941, 11644, 13697, 25680, 29882,
+ 1.32165e+08, 1.62439e+08, 4.16837e+08, 9.59097e+08, 1.32491e+09, 1.8728e+09,
+ 1.0672e+17, 2.69606e+46, 1.98285e+79, 1.0098e+82, 7.93064e+88, 3.67444e+121,
+ 9.36506e+123, 7.27954e+162, 3.05316e+168, 1.16171e+175, 1.64771e+189,
+ 1.1622e+202, 2.00748e+239, 2.51778e+244, 3.90282e+306, 1.79769e+308,
+ V8_INFINITY};
+
+
+const int32_t kInt32Values[] = {
-2147483647 - 1, -2104508227, -2103151830, -1435284490, -1378926425,
- -1318814539, -1289388009, -1287537572, -1279026536, -1241605942,
- -1226046939, -941837148, -779818051, -413830641, -245798087,
- -184657557, -127145950, -105483328, -32325, -26653,
- -23858, -23834, -22363, -19858, -19044,
- -18744, -15528, -5309, -3372, -2093,
- -104, -98, -97, -93, -84,
- -80, -78, -76, -72, -58,
- -57, -56, -55, -45, -40,
- -34, -32, -25, -24, -5,
- -2, 0, 3, 10, 24,
- 34, 42, 46, 47, 48,
- 52, 56, 64, 65, 71,
- 76, 79, 81, 82, 97,
- 102, 103, 104, 106, 107,
- 109, 116, 122, 3653, 4485,
- 12405, 16504, 26262, 28704, 29755,
- 30554, 16476817, 605431957, 832401070, 873617242,
- 914205764, 1062628108, 1087581664, 1488498068, 1534668023,
- 1661587028, 1696896187, 1866841746, 2032089723, 2147483647};
-
-
-static const uint32_t kUint32Values[] = {
+ -1318814539, -1289388009, -1287537572, -1279026536, -1241605942,
+ -1226046939, -941837148, -779818051, -413830641, -245798087, -184657557,
+ -127145950, -105483328, -32325, -26653, -23858, -23834, -22363, -19858,
+ -19044, -18744, -15528, -5309, -3372, -2093, -104, -98, -97, -93, -84, -80,
+ -78, -76, -72, -58, -57, -56, -55, -45, -40, -34, -32, -25, -24, -5, -2, 0,
+ 3, 10, 24, 34, 42, 46, 47, 48, 52, 56, 64, 65, 71, 76, 79, 81, 82, 97, 102,
+ 103, 104, 106, 107, 109, 116, 122, 3653, 4485, 12405, 16504, 26262, 28704,
+ 29755, 30554, 16476817, 605431957, 832401070, 873617242, 914205764,
+ 1062628108, 1087581664, 1488498068, 1534668023, 1661587028, 1696896187,
+ 1866841746, 2032089723, 2147483647};
+
+
+const uint32_t kUint32Values[] = {
0x0, 0x5, 0x8, 0xc, 0xd, 0x26,
0x28, 0x29, 0x30, 0x34, 0x3e, 0x42,
0x50, 0x5b, 0x63, 0x71, 0x77, 0x7c,
@@ -120,60 +109,15 @@ static const uint32_t kUint32Values[] = {
0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
-} // namespace
-
-
-// -----------------------------------------------------------------------------
-// Unary operators
-
-
-namespace {
-struct UnaryOperator {
- const Operator* (SimplifiedOperatorBuilder::*constructor)();
- const char* constructor_name;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
- return os << unop.constructor_name;
-}
-
-
-static const UnaryOperator kUnaryOperators[] = {
- {&SimplifiedOperatorBuilder::AnyToBoolean, "AnyToBoolean"},
- {&SimplifiedOperatorBuilder::BooleanNot, "BooleanNot"},
- {&SimplifiedOperatorBuilder::ChangeBitToBool, "ChangeBitToBool"},
- {&SimplifiedOperatorBuilder::ChangeBoolToBit, "ChangeBoolToBit"},
- {&SimplifiedOperatorBuilder::ChangeFloat64ToTagged,
- "ChangeFloat64ToTagged"},
- {&SimplifiedOperatorBuilder::ChangeInt32ToTagged, "ChangeInt32ToTagged"},
- {&SimplifiedOperatorBuilder::ChangeTaggedToFloat64,
- "ChangeTaggedToFloat64"},
- {&SimplifiedOperatorBuilder::ChangeTaggedToInt32, "ChangeTaggedToInt32"},
- {&SimplifiedOperatorBuilder::ChangeTaggedToUint32, "ChangeTaggedToUint32"},
- {&SimplifiedOperatorBuilder::ChangeUint32ToTagged, "ChangeUint32ToTagged"}};
+const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::quiet_NaN(),
+ bit_cast<double>(V8_UINT64_C(0x7FFFFFFFFFFFFFFF)),
+ bit_cast<double>(V8_UINT64_C(0xFFFFFFFFFFFFFFFF))};
} // namespace
-typedef SimplifiedOperatorReducerTestWithParam<UnaryOperator>
- SimplifiedUnaryOperatorTest;
-
-
-TEST_P(SimplifiedUnaryOperatorTest, Parameter) {
- const UnaryOperator& unop = GetParam();
- Reduction reduction = Reduce(graph()->NewNode(
- (simplified()->*unop.constructor)(), Parameter(Type::Any())));
- EXPECT_FALSE(reduction.Changed());
-}
-
-
-INSTANTIATE_TEST_CASE_P(SimplifiedOperatorReducerTest,
- SimplifiedUnaryOperatorTest,
- ::testing::ValuesIn(kUnaryOperators));
-
-
// -----------------------------------------------------------------------------
// AnyToBoolean
@@ -372,23 +316,13 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithConstant) {
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant1) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
- NumberConstant(-base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsFloat64Constant(BitEq(-base::OS::nan_value())));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant2) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
- NumberConstant(base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsFloat64Constant(BitEq(base::OS::nan_value())));
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant) {
+ TRACED_FOREACH(double, nan, kNaNs) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(), NumberConstant(nan)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(BitEq(nan)));
+ }
}
@@ -428,21 +362,13 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithConstant) {
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant1) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
- NumberConstant(-base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant2) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
- NumberConstant(base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant) {
+ TRACED_FOREACH(double, nan, kNaNs) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(), NumberConstant(nan)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+ }
}
@@ -483,21 +409,13 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant1) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
- NumberConstant(-base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant2) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
- NumberConstant(base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant) {
+ TRACED_FOREACH(double, nan, kNaNs) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(), NumberConstant(nan)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+ }
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index bc537fd952..680793023f 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -51,6 +51,7 @@ const PureOperator kPureOperators[] = {
PURE(NumberModulus, Operator::kNoProperties, 2),
PURE(NumberToInt32, Operator::kNoProperties, 1),
PURE(NumberToUint32, Operator::kNoProperties, 1),
+ PURE(PlainPrimitiveToNumber, Operator::kNoProperties, 1),
PURE(StringEqual, Operator::kCommutative, 2),
PURE(StringLessThan, Operator::kNoProperties, 2),
PURE(StringLessThanOrEqual, Operator::kNoProperties, 2),
diff --git a/deps/v8/test/cctest/compiler/test-typer.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 5f7f55afaa..86a6de3f38 100644
--- a/deps/v8/test/cctest/compiler/test-typer.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,28 +6,23 @@
#include "src/codegen.h"
#include "src/compiler/js-operator.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/typer.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
+#include "src/compiler/node-properties.h"
#include "test/cctest/types-fuzz.h"
+#include "test/unittests/compiler/graph-unittest.h"
using namespace v8::internal;
using namespace v8::internal::compiler;
// TODO(titzer): generate a large set of deterministic inputs for these tests.
-class TyperTester : public HandleAndZoneScope, public GraphAndBuilders {
+class TyperTest : public TypedGraphTest {
public:
- TyperTester()
- : GraphAndBuilders(main_zone()),
- types_(main_zone(), isolate()),
- typer_(graph(), MaybeHandle<Context>()),
- javascript_(main_zone()) {
- Node* s = graph()->NewNode(common()->Start(3));
- graph()->SetStart(s);
+ TyperTest()
+ : TypedGraphTest(3),
+ types_(zone(), isolate(), random_number_generator()),
+ javascript_(zone()) {
context_node_ = graph()->NewNode(common()->Parameter(2), graph()->start());
- rng_ = isolate()->random_number_generator();
+ rng_ = random_number_generator();
integers.push_back(0);
integers.push_back(0);
@@ -54,28 +49,19 @@ class TyperTester : public HandleAndZoneScope, public GraphAndBuilders {
}
Types<Type, Type*, Zone> types_;
- Typer typer_;
JSOperatorBuilder javascript_;
Node* context_node_;
v8::base::RandomNumberGenerator* rng_;
std::vector<double> integers;
std::vector<double> int32s;
- Isolate* isolate() { return main_isolate(); }
- Graph* graph() { return main_graph_; }
- CommonOperatorBuilder* common() { return &main_common_; }
-
- Node* Parameter(int index = 0) {
- return graph()->NewNode(common()->Parameter(index), graph()->start());
- }
-
Type* TypeBinaryOp(const Operator* op, Type* lhs, Type* rhs) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
NodeProperties::SetBounds(p0, Bounds(lhs));
NodeProperties::SetBounds(p1, Bounds(rhs));
- Node* n = graph()->NewNode(
- op, p0, p1, context_node_, graph()->start(), graph()->start());
+ Node* n = graph()->NewNode(op, p0, p1, context_node_, graph()->start(),
+ graph()->start());
return NodeProperties::GetBounds(n).upper;
}
@@ -87,18 +73,18 @@ class TyperTester : public HandleAndZoneScope, public GraphAndBuilders {
}
Type* NewRange(double i, double j) {
- Factory* f = isolate()->factory();
- i::Handle<i::Object> min = f->NewNumber(i);
- i::Handle<i::Object> max = f->NewNumber(j);
- if (min->Number() > max->Number()) std::swap(min, max);
- return Type::Range(min, max, main_zone());
+ if (i > j) std::swap(i, j);
+ return Type::Range(i, j, zone());
}
double RandomInt(double min, double max) {
switch (rng_->NextInt(4)) {
- case 0: return min;
- case 1: return max;
- default: break;
+ case 0:
+ return min;
+ case 1:
+ return max;
+ default:
+ break;
}
if (min == +V8_INFINITY) return +V8_INFINITY;
if (max == -V8_INFINITY) return -V8_INFINITY;
@@ -113,7 +99,7 @@ class TyperTester : public HandleAndZoneScope, public GraphAndBuilders {
}
double RandomInt(Type::RangeType* range) {
- return RandomInt(range->Min()->Number(), range->Max()->Number());
+ return RandomInt(range->Min(), range->Max());
}
// Careful, this function runs O(max_width^5) trials.
@@ -133,8 +119,8 @@ class TyperTester : public HandleAndZoneScope, public GraphAndBuilders {
for (int x2 = rmin; x2 < rmin + width; x2++) {
double result_value = opfun(x1, x2);
Type* result_type = Type::Constant(
- isolate()->factory()->NewNumber(result_value), main_zone());
- CHECK(result_type->Is(expected_type));
+ isolate()->factory()->NewNumber(result_value), zone());
+ EXPECT_TRUE(result_type->Is(expected_type));
}
}
}
@@ -154,8 +140,8 @@ class TyperTester : public HandleAndZoneScope, public GraphAndBuilders {
double x2 = RandomInt(r2);
double result_value = opfun(x1, x2);
Type* result_type = Type::Constant(
- isolate()->factory()->NewNumber(result_value), main_zone());
- CHECK(result_type->Is(expected_type));
+ isolate()->factory()->NewNumber(result_value), zone());
+ EXPECT_TRUE(result_type->Is(expected_type));
}
}
}
@@ -173,8 +159,8 @@ class TyperTester : public HandleAndZoneScope, public GraphAndBuilders {
Type* result_type =
Type::Constant(result_value ? isolate()->factory()->true_value()
: isolate()->factory()->false_value(),
- main_zone());
- CHECK(result_type->Is(expected_type));
+ zone());
+ EXPECT_TRUE(result_type->Is(expected_type));
}
}
}
@@ -190,8 +176,8 @@ class TyperTester : public HandleAndZoneScope, public GraphAndBuilders {
int32_t x2 = static_cast<int32_t>(RandomInt(r2));
double result_value = opfun(x1, x2);
Type* result_type = Type::Constant(
- isolate()->factory()->NewNumber(result_value), main_zone());
- CHECK(result_type->Is(expected_type));
+ isolate()->factory()->NewNumber(result_value), zone());
+ EXPECT_TRUE(result_type->Is(expected_type));
}
}
}
@@ -209,20 +195,26 @@ class TyperTester : public HandleAndZoneScope, public GraphAndBuilders {
Type* type1 = types_.Fuzz();
Type* type2 = types_.Fuzz();
Type* type = TypeBinaryOp(op, type1, type2);
- Type* subtype1 = RandomSubtype(type1);;
- Type* subtype2 = RandomSubtype(type2);;
+ Type* subtype1 = RandomSubtype(type1);
+ ;
+ Type* subtype2 = RandomSubtype(type2);
+ ;
Type* subtype = TypeBinaryOp(op, subtype1, subtype2);
- CHECK(subtype->Is(type));
+ EXPECT_TRUE(subtype->Is(type));
}
}
};
-static int32_t shift_left(int32_t x, int32_t y) { return x << y; }
-static int32_t shift_right(int32_t x, int32_t y) { return x >> y; }
-static int32_t bit_or(int32_t x, int32_t y) { return x | y; }
-static int32_t bit_and(int32_t x, int32_t y) { return x & y; }
-static int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
+namespace {
+
+int32_t shift_left(int32_t x, int32_t y) { return x << y; }
+int32_t shift_right(int32_t x, int32_t y) { return x >> y; }
+int32_t bit_or(int32_t x, int32_t y) { return x | y; }
+int32_t bit_and(int32_t x, int32_t y) { return x & y; }
+int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
+
+} // namespace
//------------------------------------------------------------------------------
@@ -232,115 +224,96 @@ static int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
// to ranges as input types.
-TEST(TypeJSAdd) {
- TyperTester t;
- t.TestBinaryArithOp(t.javascript_.Add(), std::plus<double>());
+TEST_F(TyperTest, TypeJSAdd) {
+ TestBinaryArithOp(javascript_.Add(), std::plus<double>());
}
-TEST(TypeJSSubtract) {
- TyperTester t;
- t.TestBinaryArithOp(t.javascript_.Subtract(), std::minus<double>());
+TEST_F(TyperTest, TypeJSSubtract) {
+ TestBinaryArithOp(javascript_.Subtract(), std::minus<double>());
}
-TEST(TypeJSMultiply) {
- TyperTester t;
- t.TestBinaryArithOp(t.javascript_.Multiply(), std::multiplies<double>());
+TEST_F(TyperTest, TypeJSMultiply) {
+ TestBinaryArithOp(javascript_.Multiply(), std::multiplies<double>());
}
-TEST(TypeJSDivide) {
- TyperTester t;
- t.TestBinaryArithOp(t.javascript_.Divide(), std::divides<double>());
+TEST_F(TyperTest, TypeJSDivide) {
+ TestBinaryArithOp(javascript_.Divide(), std::divides<double>());
}
-TEST(TypeJSModulus) {
- TyperTester t;
- t.TestBinaryArithOp(t.javascript_.Modulus(), modulo);
+TEST_F(TyperTest, TypeJSModulus) {
+ TestBinaryArithOp(javascript_.Modulus(), modulo);
}
-TEST(TypeJSBitwiseOr) {
- TyperTester t;
- t.TestBinaryBitOp(t.javascript_.BitwiseOr(), bit_or);
+TEST_F(TyperTest, TypeJSBitwiseOr) {
+ TestBinaryBitOp(javascript_.BitwiseOr(), bit_or);
}
-TEST(TypeJSBitwiseAnd) {
- TyperTester t;
- t.TestBinaryBitOp(t.javascript_.BitwiseAnd(), bit_and);
+TEST_F(TyperTest, TypeJSBitwiseAnd) {
+ TestBinaryBitOp(javascript_.BitwiseAnd(), bit_and);
}
-TEST(TypeJSBitwiseXor) {
- TyperTester t;
- t.TestBinaryBitOp(t.javascript_.BitwiseXor(), bit_xor);
+TEST_F(TyperTest, TypeJSBitwiseXor) {
+ TestBinaryBitOp(javascript_.BitwiseXor(), bit_xor);
}
-TEST(TypeJSShiftLeft) {
- TyperTester t;
- t.TestBinaryBitOp(t.javascript_.ShiftLeft(), shift_left);
+TEST_F(TyperTest, TypeJSShiftLeft) {
+ TestBinaryBitOp(javascript_.ShiftLeft(), shift_left);
}
-TEST(TypeJSShiftRight) {
- TyperTester t;
- t.TestBinaryBitOp(t.javascript_.ShiftRight(), shift_right);
+TEST_F(TyperTest, TypeJSShiftRight) {
+ TestBinaryBitOp(javascript_.ShiftRight(), shift_right);
}
-TEST(TypeJSLessThan) {
- TyperTester t;
- t.TestBinaryCompareOp(t.javascript_.LessThan(), std::less<double>());
+TEST_F(TyperTest, TypeJSLessThan) {
+ TestBinaryCompareOp(javascript_.LessThan(), std::less<double>());
}
-TEST(TypeJSLessThanOrEqual) {
- TyperTester t;
- t.TestBinaryCompareOp(
- t.javascript_.LessThanOrEqual(), std::less_equal<double>());
+TEST_F(TyperTest, TypeJSLessThanOrEqual) {
+ TestBinaryCompareOp(javascript_.LessThanOrEqual(), std::less_equal<double>());
}
-TEST(TypeJSGreaterThan) {
- TyperTester t;
- t.TestBinaryCompareOp(t.javascript_.GreaterThan(), std::greater<double>());
+TEST_F(TyperTest, TypeJSGreaterThan) {
+ TestBinaryCompareOp(javascript_.GreaterThan(), std::greater<double>());
}
-TEST(TypeJSGreaterThanOrEqual) {
- TyperTester t;
- t.TestBinaryCompareOp(
- t.javascript_.GreaterThanOrEqual(), std::greater_equal<double>());
+TEST_F(TyperTest, TypeJSGreaterThanOrEqual) {
+ TestBinaryCompareOp(javascript_.GreaterThanOrEqual(),
+ std::greater_equal<double>());
}
-TEST(TypeJSEqual) {
- TyperTester t;
- t.TestBinaryCompareOp(t.javascript_.Equal(), std::equal_to<double>());
+TEST_F(TyperTest, TypeJSEqual) {
+ TestBinaryCompareOp(javascript_.Equal(), std::equal_to<double>());
}
-TEST(TypeJSNotEqual) {
- TyperTester t;
- t.TestBinaryCompareOp(t.javascript_.NotEqual(), std::not_equal_to<double>());
+TEST_F(TyperTest, TypeJSNotEqual) {
+ TestBinaryCompareOp(javascript_.NotEqual(), std::not_equal_to<double>());
}
// For numbers there's no difference between strict and non-strict equality.
-TEST(TypeJSStrictEqual) {
- TyperTester t;
- t.TestBinaryCompareOp(t.javascript_.StrictEqual(), std::equal_to<double>());
+TEST_F(TyperTest, TypeJSStrictEqual) {
+ TestBinaryCompareOp(javascript_.StrictEqual(), std::equal_to<double>());
}
-TEST(TypeJSStrictNotEqual) {
- TyperTester t;
- t.TestBinaryCompareOp(
- t.javascript_.StrictNotEqual(), std::not_equal_to<double>());
+TEST_F(TyperTest, TypeJSStrictNotEqual) {
+ TestBinaryCompareOp(javascript_.StrictNotEqual(),
+ std::not_equal_to<double>());
}
@@ -350,31 +323,44 @@ TEST(TypeJSStrictNotEqual) {
// List should be in sync with JS_SIMPLE_BINOP_LIST.
#define JSBINOP_LIST(V) \
- V(Equal) \
- V(NotEqual) \
- V(StrictEqual) \
- V(StrictNotEqual) \
- V(LessThan) \
- V(GreaterThan) \
- V(LessThanOrEqual) \
+ V(Equal) \
+ V(NotEqual) \
+ V(StrictEqual) \
+ V(StrictNotEqual) \
+ V(LessThan) \
+ V(GreaterThan) \
+ V(LessThanOrEqual) \
V(GreaterThanOrEqual) \
- V(BitwiseOr) \
- V(BitwiseXor) \
- V(BitwiseAnd) \
- V(ShiftLeft) \
- V(ShiftRight) \
- V(ShiftRightLogical) \
- V(Add) \
- V(Subtract) \
- V(Multiply) \
- V(Divide) \
+ V(BitwiseOr) \
+ V(BitwiseXor) \
+ V(BitwiseAnd) \
+ V(ShiftLeft) \
+ V(ShiftRight) \
+ V(ShiftRightLogical) \
+ V(Add) \
+ V(Subtract) \
+ V(Multiply) \
+ V(Divide) \
V(Modulus)
-#define TEST_FUNC(name) \
- TEST(Monotonicity_##name) { \
- TyperTester t; \
- t.TestBinaryMonotonicity(t.javascript_.name()); \
+#define TEST_FUNC(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity(javascript_.name()); \
}
JSBINOP_LIST(TEST_FUNC)
#undef TEST_FUNC
+
+
+//------------------------------------------------------------------------------
+// Regression tests
+
+
+TEST_F(TyperTest, TypeRegressInt32Constant) {
+ int values[] = {-5, 10};
+ for (auto i : values) {
+ Node* c = graph()->NewNode(common()->Int32Constant(i));
+ Type* type = NodeProperties::GetBounds(c).upper;
+ EXPECT_TRUE(type->Is(NewRange(i, i)));
+ }
+}
diff --git a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
index b6be0bff17..5ea375ff19 100644
--- a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
@@ -5,6 +5,8 @@
#include <limits>
#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
#include "src/compiler/value-numbering-reducer.h"
#include "test/unittests/test-utils.h"
@@ -20,8 +22,8 @@ struct TestOperator : public Operator {
};
-static const TestOperator kOp0(0, Operator::kEliminatable, 0, 1);
-static const TestOperator kOp1(1, Operator::kEliminatable, 1, 1);
+static const TestOperator kOp0(0, Operator::kIdempotent, 0, 1);
+static const TestOperator kOp1(1, Operator::kIdempotent, 1, 1);
class ValueNumberingReducerTest : public TestWithZone {
@@ -74,17 +76,17 @@ TEST_F(ValueNumberingReducerTest, OperatorEqualityNotIdentity) {
Operator::Opcode opcode = static_cast<Operator::Opcode>(
std::numeric_limits<Operator::Opcode>::max() - i);
inputs[i] = graph()->NewNode(
- new (zone()) TestOperator(opcode, Operator::kEliminatable, 0, 1));
+ new (zone()) TestOperator(opcode, Operator::kIdempotent, 0, 1));
}
TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
const TestOperator op1(static_cast<Operator::Opcode>(input_count),
- Operator::kEliminatable, input_count, 1);
+ Operator::kIdempotent, input_count, 1);
Node* n1 = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
Reduction r1 = Reduce(n1);
EXPECT_FALSE(r1.Changed());
const TestOperator op2(static_cast<Operator::Opcode>(input_count),
- Operator::kEliminatable, input_count, 1);
+ Operator::kIdempotent, input_count, 1);
Node* n2 = graph()->NewNode(&op2, static_cast<int>(input_count), inputs);
Reduction r2 = Reduce(n2);
EXPECT_TRUE(r2.Changed());
@@ -100,10 +102,10 @@ TEST_F(ValueNumberingReducerTest, SubsequentReductionsYieldTheSameNode) {
Operator::Opcode opcode = static_cast<Operator::Opcode>(
std::numeric_limits<Operator::Opcode>::max() - i);
inputs[i] = graph()->NewNode(
- new (zone()) TestOperator(opcode, Operator::kEliminatable, 0, 1));
+ new (zone()) TestOperator(opcode, Operator::kIdempotent, 0, 1));
}
TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
- const TestOperator op1(1, Operator::kEliminatable, input_count, 1);
+ const TestOperator op1(1, Operator::kIdempotent, input_count, 1);
Node* n = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
Reduction r = Reduce(n);
EXPECT_FALSE(r.Changed());
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index 9ef0fa58ca..e524764f82 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -63,15 +63,6 @@ TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
}
-TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
- m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
-}
-
-
// -----------------------------------------------------------------------------
// Loads and stores
@@ -206,37 +197,49 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
// TruncateInt64ToInt32.
-TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
StreamBuilder m(this, kMachInt32, kMachInt64);
- Node* const p = m.Parameter(0);
- Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
- m.Return(t);
+ m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
- EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+ ASSERT_EQ(0U, s.size());
}
-TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
- Node* const p = m.Parameter(0);
- Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
- m.Return(t);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
- EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
+ TRACED_FORRANGE(int32_t, k, 1, 32) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(k)));
+ m.Return(t);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
+ EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shl) {
+ TRACED_FORRANGE(int32_t, k, 1, 31) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Shl(p, m.Int64Constant(k)));
+ m.Return(t);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Shl32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
+ EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+ }
}
@@ -991,7 +994,43 @@ TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
// -----------------------------------------------------------------------------
-// Word64Shl.
+// Floating point operations.
+
+
+TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
+ Node* mul = m.Float64Mul(add, m.Parameter(1));
+ Node* sub = m.Float64Sub(mul, add);
+ Node* ret = m.Float64Div(mul, sub);
+ m.Return(ret);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
+ EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
+ EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
+ }
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
+ Node* mul = m.Float64Mul(add, m.Parameter(1));
+ Node* sub = m.Float64Sub(mul, add);
+ Node* ret = m.Float64Div(mul, sub);
+ m.Return(ret);
+ Stream s = m.Build();
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
+ EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
+ EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
@@ -1032,34 +1071,62 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
}
-TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
+TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
- Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
- Node* mul = m.Float64Mul(add, m.Parameter(1));
- Node* sub = m.Float64Sub(mul, add);
- Node* ret = m.Float64Div(mul, sub);
- m.Return(ret);
- Stream s = m.Build(AVX);
- ASSERT_EQ(4U, s.size());
- EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
- EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
- EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
- EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32And(p0, m.Int32Constant(0xff));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
- Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
- Node* mul = m.Float64Mul(add, m.Parameter(1));
- Node* sub = m.Float64Sub(mul, add);
- Node* ret = m.Float64Div(mul, sub);
- m.Return(ret);
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32And(m.Int32Constant(0xff), p0);
+ m.Return(n);
Stream s = m.Build();
- ASSERT_EQ(4U, s.size());
- EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
- EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
- EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
- EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32And(p0, m.Int32Constant(0xffff));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32And(m.Int32Constant(0xffff), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
}
diff --git a/deps/v8/test/unittests/compiler/zone-pool-unittest.cc b/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
index e23557adc4..3bfde4bdce 100644
--- a/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
+++ b/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
@@ -12,7 +12,7 @@ namespace compiler {
class ZonePoolTest : public TestWithIsolate {
public:
- ZonePoolTest() : zone_pool_(isolate()) {}
+ ZonePoolTest() {}
protected:
ZonePool* zone_pool() { return &zone_pool_; }
@@ -32,9 +32,9 @@ class ZonePoolTest : public TestWithIsolate {
size_t Allocate(Zone* zone) {
size_t bytes = rng.NextInt(25) + 7;
- int size_before = zone->allocation_size();
- zone->New(static_cast<int>(bytes));
- return static_cast<size_t>(zone->allocation_size() - size_before);
+ size_t size_before = zone->allocation_size();
+ zone->New(bytes);
+ return zone->allocation_size() - size_before;
}
private:
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
index 8c361ddc36..d2cc7dd77e 100644
--- a/deps/v8/test/unittests/run-all-unittests.cc
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -7,6 +7,10 @@
#include "src/base/compiler-specific.h"
#include "testing/gmock/include/gmock/gmock.h"
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+#include "src/startup-data-util.h"
+#endif // V8_USE_EXTERNAL_STARTUP_DATA
+
namespace {
class DefaultPlatformEnvironment FINAL : public ::testing::Environment {
@@ -41,5 +45,8 @@ int main(int argc, char** argv) {
testing::InitGoogleMock(&argc, argv);
testing::AddGlobalTestEnvironment(new DefaultPlatformEnvironment);
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+ v8::StartupDataHandler startup_data(argv[0], NULL, NULL);
+#endif
return RUN_ALL_TESTS();
}
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 31d724ab68..5d1c1f14f6 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -94,6 +94,7 @@ namespace internal {
TestWithIsolate::~TestWithIsolate() {}
+TestWithIsolateAndZone::~TestWithIsolateAndZone() {}
Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 511e3574c8..42831a7db0 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -16,7 +16,7 @@ namespace v8 {
std::ostream& operator<<(std::ostream&, ExternalArrayType);
-class TestWithIsolate : public ::testing::Test {
+class TestWithIsolate : public virtual ::testing::Test {
public:
TestWithIsolate();
virtual ~TestWithIsolate();
@@ -90,9 +90,9 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
};
-class TestWithZone : public TestWithIsolate {
+class TestWithZone : public virtual ::testing::Test {
public:
- TestWithZone() : zone_(isolate()) {}
+ TestWithZone() {}
virtual ~TestWithZone();
Zone* zone() { return &zone_; }
@@ -103,6 +103,21 @@ class TestWithZone : public TestWithIsolate {
DISALLOW_COPY_AND_ASSIGN(TestWithZone);
};
+
+class TestWithIsolateAndZone : public virtual TestWithIsolate {
+ public:
+ TestWithIsolateAndZone() {}
+ virtual ~TestWithIsolateAndZone();
+
+ Zone* zone() { return &zone_; }
+
+ private:
+ Zone zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWithIsolateAndZone);
+};
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 2ead44fce1..a12d5e7f4b 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -28,6 +28,7 @@
'base/division-by-constant-unittest.cc',
'base/flags-unittest.cc',
'base/functional-unittest.cc',
+ 'base/logging-unittest.cc',
'base/iterator-unittest.cc',
'base/platform/condition-variable-unittest.cc',
'base/platform/mutex-unittest.cc',
@@ -42,6 +43,8 @@
'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h',
'compiler/control-equivalence-unittest.cc',
+ 'compiler/control-flow-optimizer-unittest.cc',
+ 'compiler/control-reducer-unittest.cc',
'compiler/diamond-unittest.cc',
'compiler/graph-reducer-unittest.cc',
'compiler/graph-unittest.cc',
@@ -51,19 +54,27 @@
'compiler/instruction-sequence-unittest.cc',
'compiler/instruction-sequence-unittest.h',
'compiler/js-builtin-reducer-unittest.cc',
+ 'compiler/js-intrinsic-lowering-unittest.cc',
'compiler/js-operator-unittest.cc',
'compiler/js-typed-lowering-unittest.cc',
'compiler/load-elimination-unittest.cc',
+ 'compiler/loop-peeling-unittest.cc',
'compiler/machine-operator-reducer-unittest.cc',
'compiler/machine-operator-unittest.cc',
'compiler/move-optimizer-unittest.cc',
'compiler/node-matchers-unittest.cc',
+ 'compiler/node-properties-unittest.cc',
'compiler/node-test-utils.cc',
'compiler/node-test-utils.h',
+ 'compiler/node-unittest.cc',
+ 'compiler/opcodes-unittest.cc',
'compiler/register-allocator-unittest.cc',
+ 'compiler/schedule-unittest.cc',
'compiler/select-lowering-unittest.cc',
+ 'compiler/scheduler-unittest.cc',
'compiler/simplified-operator-reducer-unittest.cc',
'compiler/simplified-operator-unittest.cc',
+ 'compiler/typer-unittest.cc',
'compiler/value-numbering-reducer-unittest.cc',
'compiler/zone-pool-unittest.cc',
'libplatform/default-platform-unittest.cc',
@@ -73,6 +84,8 @@
'run-all-unittests.cc',
'test-utils.h',
'test-utils.cc',
+ '../../src/startup-data-util.h',
+ '../../src/startup-data-util.cc'
],
'conditions': [
['v8_target_arch=="arm"', {
@@ -105,6 +118,11 @@
'compiler/x64/instruction-selector-x64-unittest.cc',
],
}],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ 'compiler/ppc/instruction-selector-ppc-unittest.cc',
+ ],
+ }],
['component=="shared_library"', {
# compiler-unittests can't be built against a shared library, so we
# need to depend on the underlying static target in that case.
diff --git a/deps/v8/test/webkit/exception-for-nonobject-expected.txt b/deps/v8/test/webkit/exception-for-nonobject-expected.txt
index e6003986b9..b6cb95b946 100644
--- a/deps/v8/test/webkit/exception-for-nonobject-expected.txt
+++ b/deps/v8/test/webkit/exception-for-nonobject-expected.txt
@@ -26,7 +26,7 @@ Test for correct handling of exceptions from instanceof and 'new' expressions
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS new {}.undefined threw exception TypeError: undefined is not a function.
+PASS new {}.undefined threw exception TypeError: (intermediate value).undefined is not a function.
PASS 1 instanceof {}.undefined threw exception TypeError: Expecting a function in instanceof check, but got undefined.
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
index 3e36c70615..9d1c1748de 100644
--- a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
+++ b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
@@ -28,13 +28,12 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS [1].toString() is '1'
PASS [1].toLocaleString() is 'toLocaleString'
-FAIL [1].toLocaleString() should be 1. Threw exception TypeError: string is not a function
+FAIL [1].toLocaleString() should be 1. Threw exception TypeError: [1].toLocaleString is not a function
PASS [/r/].toString() is 'toString2'
PASS [/r/].toLocaleString() is 'toLocaleString2'
-FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: string is not a function
+FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: [/r/].toLocaleString is not a function
PASS caught is true
PASS successfullyParsed is true
-
TEST COMPLETE
diff --git a/deps/v8/test/webkit/object-literal-syntax-expected.txt b/deps/v8/test/webkit/object-literal-syntax-expected.txt
index f9764454c5..8e1242109d 100644
--- a/deps/v8/test/webkit/object-literal-syntax-expected.txt
+++ b/deps/v8/test/webkit/object-literal-syntax-expected.txt
@@ -26,20 +26,20 @@ Make sure that we correctly identify parse errors in object literals
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS ({a:1, get a(){}}) threw exception SyntaxError: Object literal may not have data and accessor property with the same name.
-PASS ({a:1, set a(v){}}) threw exception SyntaxError: Object literal may not have data and accessor property with the same name.
-PASS ({get a(){}, a:1}) threw exception SyntaxError: Object literal may not have data and accessor property with the same name.
-PASS ({set a(v){}, a:1}) threw exception SyntaxError: Object literal may not have data and accessor property with the same name.
-PASS ({get a(){}, get a(){}}) threw exception SyntaxError: Object literal may not have multiple get/set accessors with the same name.
-PASS ({set a(v){}, set a(v){}}) threw exception SyntaxError: Object literal may not have multiple get/set accessors with the same name.
-PASS ({set a(v){}, get a(){}, set a(v){}}) threw exception SyntaxError: Object literal may not have multiple get/set accessors with the same name.
-PASS (function(){({a:1, get a(){}})}) threw exception SyntaxError: Object literal may not have data and accessor property with the same name.
-PASS (function(){({a:1, set a(v){}})}) threw exception SyntaxError: Object literal may not have data and accessor property with the same name.
-PASS (function(){({get a(){}, a:1})}) threw exception SyntaxError: Object literal may not have data and accessor property with the same name.
-PASS (function(){({set a(v){}, a:1})}) threw exception SyntaxError: Object literal may not have data and accessor property with the same name.
-PASS (function(){({get a(){}, get a(){}})}) threw exception SyntaxError: Object literal may not have multiple get/set accessors with the same name.
-PASS (function(){({set a(v){}, set a(v){}})}) threw exception SyntaxError: Object literal may not have multiple get/set accessors with the same name.
-PASS (function(){({set a(v){}, get a(){}, set a(v){}})}) threw exception SyntaxError: Object literal may not have multiple get/set accessors with the same name.
+PASS ({a:1, get a(){}}), true is true
+PASS ({a:1, set a(v){}}), true is true
+PASS ({get a(){}, a:1}), true is true
+PASS ({set a(v){}, a:1}), true is true
+PASS ({get a(){}, get a(){}}), true is true
+PASS ({set a(v){}, set a(v){}}), true is true
+PASS ({set a(v){}, get a(){}, set a(v){}}), true is true
+PASS (function(){({a:1, get a(){}})}), true is true
+PASS (function(){({a:1, set a(v){}})}), true is true
+PASS (function(){({get a(){}, a:1})}), true is true
+PASS (function(){({set a(v){}, a:1})}), true is true
+PASS (function(){({get a(){}, get a(){}})}), true is true
+PASS (function(){({set a(v){}, set a(v){}})}), true is true
+PASS (function(){({set a(v){}, get a(){}, set a(v){}})}), true is true
PASS ({a:1, a:1, a:1}), true is true
PASS ({get a(){}, set a(v){}}), true is true
PASS ({set a(v){}, get a(){}}), true is true
@@ -48,5 +48,6 @@ PASS (function(){({get a(){}, set a(v){}})}), true is true
PASS (function(){({set a(v){}, get a(){}})}), true is true
PASS successfullyParsed is true
+
TEST COMPLETE
diff --git a/deps/v8/test/webkit/object-literal-syntax.js b/deps/v8/test/webkit/object-literal-syntax.js
index e9cc2dd8c5..40f842b403 100644
--- a/deps/v8/test/webkit/object-literal-syntax.js
+++ b/deps/v8/test/webkit/object-literal-syntax.js
@@ -23,20 +23,20 @@
description("Make sure that we correctly identify parse errors in object literals");
-shouldThrow("({a:1, get a(){}})");
-shouldThrow("({a:1, set a(v){}})");
-shouldThrow("({get a(){}, a:1})");
-shouldThrow("({set a(v){}, a:1})");
-shouldThrow("({get a(){}, get a(){}})");
-shouldThrow("({set a(v){}, set a(v){}})");
-shouldThrow("({set a(v){}, get a(){}, set a(v){}})");
-shouldThrow("(function(){({a:1, get a(){}})})");
-shouldThrow("(function(){({a:1, set a(v){}})})");
-shouldThrow("(function(){({get a(){}, a:1})})");
-shouldThrow("(function(){({set a(v){}, a:1})})");
-shouldThrow("(function(){({get a(){}, get a(){}})})");
-shouldThrow("(function(){({set a(v){}, set a(v){}})})");
-shouldThrow("(function(){({set a(v){}, get a(){}, set a(v){}})})");
+shouldBeTrue("({a:1, get a(){}}), true");
+shouldBeTrue("({a:1, set a(v){}}), true");
+shouldBeTrue("({get a(){}, a:1}), true");
+shouldBeTrue("({set a(v){}, a:1}), true");
+shouldBeTrue("({get a(){}, get a(){}}), true");
+shouldBeTrue("({set a(v){}, set a(v){}}), true");
+shouldBeTrue("({set a(v){}, get a(){}, set a(v){}}), true");
+shouldBeTrue("(function(){({a:1, get a(){}})}), true");
+shouldBeTrue("(function(){({a:1, set a(v){}})}), true");
+shouldBeTrue("(function(){({get a(){}, a:1})}), true");
+shouldBeTrue("(function(){({set a(v){}, a:1})}), true");
+shouldBeTrue("(function(){({get a(){}, get a(){}})}), true");
+shouldBeTrue("(function(){({set a(v){}, set a(v){}})}), true");
+shouldBeTrue("(function(){({set a(v){}, get a(){}, set a(v){}})}), true");
shouldBeTrue("({a:1, a:1, a:1}), true");
shouldBeTrue("({get a(){}, set a(v){}}), true");
shouldBeTrue("({set a(v){}, get a(){}}), true");
diff --git a/deps/v8/test/webkit/run-json-stringify-expected.txt b/deps/v8/test/webkit/run-json-stringify-expected.txt
index 45d55c7caf..fef38156a7 100644
--- a/deps/v8/test/webkit/run-json-stringify-expected.txt
+++ b/deps/v8/test/webkit/run-json-stringify-expected.txt
@@ -83,7 +83,7 @@ PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON});
}
-PASS tests[i](nativeJSON) threw exception TypeError: undefined is not a function.
+PASS tests[i](nativeJSON) threw exception TypeError: jsonObject.stringify is not a function.
function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ return "custom toISOString"; }});
}
@@ -101,7 +101,7 @@ function (jsonObject){
d.toISOString = null;
return jsonObject.stringify(d);
}
-PASS tests[i](nativeJSON) threw exception TypeError: object is not a function.
+PASS tests[i](nativeJSON) threw exception TypeError: jsonObject.stringify is not a function.
function (jsonObject){
var d = new Date(0);
d.toJSON = undefined;
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index c33f1b928e..fb8d77d8d2 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -44,9 +44,8 @@
'dfg-double-vote-fuzz': [SKIP],
'reentrant-caching': [SKIP],
'sort-large-array': [SKIP],
- # Too slow on windows with --nocrankshaft.
- # TODO(mstarzinger): Too slow with TF.
- 'array-iterate-backwards': [PASS, NO_VARIANTS],
+ # Too slow with --enable-slow-asserts.
+ 'array-iterate-backwards': [SKIP],
}], # 'mode == debug'
['simulator', {
# Skip tests that timeout with turbofan.
@@ -71,7 +70,6 @@
['gc_stress == True and mode == debug', {
# Skip tests that timeout.
- 'array-iterate-backwards': [SKIP]
}], # 'gc_stress == True and mode == debug'
##############################################################################
diff --git a/deps/v8/testing/gmock-support.h b/deps/v8/testing/gmock-support.h
index 012775b5cb..d00468e083 100644
--- a/deps/v8/testing/gmock-support.h
+++ b/deps/v8/testing/gmock-support.h
@@ -7,6 +7,7 @@
#include <cmath>
#include <cstring>
+#include <string>
#include "testing/gmock/include/gmock/gmock.h"
@@ -34,25 +35,6 @@ class Capture {
namespace internal {
-struct AnyBitEq {
- template <typename A, typename B>
- bool operator()(A const& a, B const& b) const {
- if (sizeof(A) != sizeof(B)) return false;
- return std::memcmp(&a, &b, sizeof(A)) == 0;
- }
-};
-
-
-template <typename Rhs>
-class BitEqMatcher : public ComparisonBase<BitEqMatcher<Rhs>, Rhs, AnyBitEq> {
- public:
- explicit BitEqMatcher(Rhs const& rhs)
- : ComparisonBase<BitEqMatcher<Rhs>, Rhs, AnyBitEq>(rhs) {}
- static const char* Desc() { return "is bitwise equal to"; }
- static const char* NegatedDesc() { return "isn't bitwise equal to"; }
-};
-
-
template <typename T>
class CaptureEqMatcher : public MatcherInterface<T> {
public:
@@ -83,10 +65,11 @@ class CaptureEqMatcher : public MatcherInterface<T> {
// Creates a polymorphic matcher that matches anything whose bit representation
-// is equal to that of x.
-template <typename T>
-inline internal::BitEqMatcher<T> BitEq(T const& x) {
- return internal::BitEqMatcher<T>(x);
+// is equal to that of {x}.
+MATCHER_P(BitEq, x, std::string(negation ? "isn't" : "is") +
+ " bitwise equal to " + PrintToString(x)) {
+ static_assert(sizeof(x) == sizeof(arg), "Size mismatch");
+ return std::memcmp(&arg, &x, sizeof(x)) == 0;
}
diff --git a/deps/v8/testing/gtest-support.h b/deps/v8/testing/gtest-support.h
index 04daa55edc..ba0e2f41f9 100644
--- a/deps/v8/testing/gtest-support.h
+++ b/deps/v8/testing/gtest-support.h
@@ -5,7 +5,6 @@
#ifndef V8_TESTING_GTEST_SUPPORT_H_
#define V8_TESTING_GTEST_SUPPORT_H_
-#include <stddef.h>
#include "testing/gtest/include/gtest/gtest.h"
namespace testing {
@@ -32,16 +31,14 @@ GET_TYPE_NAME(double)
#undef GET_TYPE_NAME
-// TRACED_FOREACH(type, var, array) expands to a loop that assigns |var| every
-// item in the |array| and adds a SCOPED_TRACE() message for the |var| while
-// inside the loop body.
-// TODO(bmeurer): Migrate to C++11 once we're ready.
-#define TRACED_FOREACH(_type, _var, _array) \
- for (size_t _i = 0; _i < arraysize(_array); ++_i) \
- for (bool _done = false; !_done;) \
- for (_type const _var = _array[_i]; !_done;) \
- for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
- !_done; _done = true)
+// TRACED_FOREACH(type, var, container) expands to a loop that assigns |var|
+// every item in the |container| and adds a SCOPED_TRACE() message for the
+// |var| while inside the loop body.
+#define TRACED_FOREACH(_type, _var, _container) \
+ for (_type const _var : _container) \
+ for (bool _done = false; !_done;) \
+ for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
+ !_done; _done = true)
// TRACED_FORRANGE(type, var, low, high) expands to a loop that assigns |var|
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index d287f7b912..9739684629 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -34,6 +34,9 @@ local FLAGS = {
-- Do not build gcsuspects file and reuse previously generated one.
reuse_gcsuspects = false;
+ -- Don't use parallel python runner.
+ sequential = false;
+
-- Print commands to console before executing them.
verbose = false;
@@ -90,7 +93,8 @@ if not CLANG_PLUGINS or CLANG_PLUGINS == "" then
CLANG_PLUGINS = DIR
end
-local function MakeClangCommandLine(plugin, plugin_args, triple, arch_define)
+local function MakeClangCommandLine(
+ plugin, plugin_args, triple, arch_define, arch_options)
if plugin_args then
for i = 1, #plugin_args do
plugin_args[i] = "-Xclang -plugin-arg-" .. plugin
@@ -109,21 +113,69 @@ local function MakeClangCommandLine(plugin, plugin_args, triple, arch_define)
.. " -I./"
.. " -Ithird_party/icu/source/common"
.. " -Ithird_party/icu/source/i18n"
+ .. " " .. arch_options
+end
+
+local function IterTable(t)
+ return coroutine.wrap(function ()
+ for i, v in ipairs(t) do
+ coroutine.yield(v)
+ end
+ end)
+end
+
+local function SplitResults(lines, func)
+ -- Splits the output of parallel.py and calls func on each result.
+ -- Bails out in case of an error in one of the executions.
+ local current = {}
+ local filename = ""
+ for line in lines do
+ local new_file = line:match "^______________ (.*)$"
+ local code = line:match "^______________ finish (%d+) ______________$"
+ if code then
+ if tonumber(code) > 0 then
+ log(table.concat(current, "\n"))
+ log("Failed to examine " .. filename)
+ return false
+ end
+ log("-- %s", filename)
+ func(filename, IterTable(current))
+ elseif new_file then
+ filename = new_file
+ current = {}
+ else
+ table.insert(current, line)
+ end
+ end
+ return true
end
function InvokeClangPluginForEachFile(filenames, cfg, func)
local cmd_line = MakeClangCommandLine(cfg.plugin,
cfg.plugin_args,
cfg.triple,
- cfg.arch_define)
- for _, filename in ipairs(filenames) do
- log("-- %s", filename)
- local action = cmd_line .. " " .. filename .. " 2>&1"
+ cfg.arch_define,
+ cfg.arch_options)
+ if FLAGS.sequential then
+ log("** Sequential execution.")
+ for _, filename in ipairs(filenames) do
+ log("-- %s", filename)
+ local action = cmd_line .. " " .. filename .. " 2>&1"
+ if FLAGS.verbose then print('popen ', action) end
+ local pipe = io.popen(action)
+ func(filename, pipe:lines())
+ local success = pipe:close()
+ if not success then error("Failed to run: " .. action) end
+ end
+ else
+ log("** Parallel execution.")
+ local action = "python tools/gcmole/parallel.py \""
+ .. cmd_line .. "\" " .. table.concat(filenames, " ")
if FLAGS.verbose then print('popen ', action) end
local pipe = io.popen(action)
- func(filename, pipe:lines())
- local success = pipe:close()
- if not success then error("Failed to run: " .. action) end
+ local success = SplitResults(pipe:lines(), func)
+ local closed = pipe:close()
+ if not (success and closed) then error("Failed to run: " .. action) end
end
end
@@ -201,13 +253,17 @@ end
local ARCHITECTURES = {
ia32 = config { triple = "i586-unknown-linux",
- arch_define = "V8_TARGET_ARCH_IA32" },
+ arch_define = "V8_TARGET_ARCH_IA32",
+ arch_options = "-m32" },
arm = config { triple = "i586-unknown-linux",
- arch_define = "V8_TARGET_ARCH_ARM" },
+ arch_define = "V8_TARGET_ARCH_ARM",
+ arch_options = "-m32" },
x64 = config { triple = "x86_64-unknown-linux",
- arch_define = "V8_TARGET_ARCH_X64" },
+ arch_define = "V8_TARGET_ARCH_X64",
+ arch_options = "" },
arm64 = config { triple = "x86_64-unknown-linux",
- arch_define = "V8_TARGET_ARCH_ARM64" },
+ arch_define = "V8_TARGET_ARCH_ARM64",
+ arch_options = "" },
}
-------------------------------------------------------------------------------
diff --git a/deps/v8/tools/gcmole/parallel.py b/deps/v8/tools/gcmole/parallel.py
new file mode 100755
index 0000000000..0c045f423c
--- /dev/null
+++ b/deps/v8/tools/gcmole/parallel.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This script calls the first argument for each of the following arguments in
+parallel. E.g.
+parallel.py "clang --opt" file1 file2
+calls
+clang --opt file1
+clang --opt file2
+
+The output (stdout and stderr) is concatenated sequentially in the form:
+______________ file1
+<output of clang --opt file1>
+______________ finish <exit code of clang --opt file1> ______________
+______________ file2
+<output of clang --opt file2>
+______________ finish <exit code of clang --opt file2> ______________
+"""
+
+import itertools
+import multiprocessing
+import subprocess
+import sys
+
+def invoke(cmdline):
+ try:
+ return (subprocess.check_output(
+ cmdline, shell=True, stderr=subprocess.STDOUT), 0)
+ except subprocess.CalledProcessError as e:
+ return (e.output, e.returncode)
+
+if __name__ == '__main__':
+ assert len(sys.argv) > 2
+ processes = multiprocessing.cpu_count()
+ pool = multiprocessing.Pool(processes=processes)
+ cmdlines = ["%s %s" % (sys.argv[1], filename) for filename in sys.argv[2:]]
+ for filename, result in itertools.izip(
+ sys.argv[2:], pool.imap(invoke, cmdlines)):
+ print "______________ %s" % filename
+ print result[0]
+ print "______________ finish %d ______________" % result[1]
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 2177ec2122..8986a91b5c 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -434,6 +434,12 @@ MINIDUMP_MEMORY_LIST = Descriptor([
("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count)
])
+MINIDUMP_MEMORY_LIST_Mac = Descriptor([
+ ("range_count", ctypes.c_uint32),
+ ("junk", ctypes.c_uint32),
+ ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count)
+])
+
MINIDUMP_MEMORY_LIST64 = Descriptor([
("range_count", ctypes.c_uint64),
("base_rva", ctypes.c_uint64),
@@ -455,6 +461,12 @@ MINIDUMP_THREAD_LIST = Descriptor([
("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
])
+MINIDUMP_THREAD_LIST_Mac = Descriptor([
+ ("thread_count", ctypes.c_uint32),
+ ("junk", ctypes.c_uint32),
+ ("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
+])
+
MINIDUMP_VS_FIXEDFILEINFO = Descriptor([
("dwSignature", ctypes.c_uint32),
("dwStrucVersion", ctypes.c_uint32),
@@ -489,6 +501,12 @@ MINIDUMP_MODULE_LIST = Descriptor([
("modules", lambda t: MINIDUMP_RAW_MODULE.ctype * t.number_of_modules)
])
+MINIDUMP_MODULE_LIST_Mac = Descriptor([
+ ("number_of_modules", ctypes.c_uint32),
+ ("junk", ctypes.c_uint32),
+ ("modules", lambda t: MINIDUMP_RAW_MODULE.ctype * t.number_of_modules)
+])
+
MINIDUMP_RAW_SYSTEM_INFO = Descriptor([
("processor_architecture", ctypes.c_uint16)
])
@@ -570,6 +588,9 @@ class MinidumpReader(object):
DebugPrint(self.exception_context)
elif d.stream_type == MD_THREAD_LIST_STREAM:
thread_list = MINIDUMP_THREAD_LIST.Read(self.minidump, d.location.rva)
+ if ctypes.sizeof(thread_list) + 4 == d.location.data_size:
+ thread_list = MINIDUMP_THREAD_LIST_Mac.Read(
+ self.minidump, d.location.rva)
assert ctypes.sizeof(thread_list) == d.location.data_size
DebugPrint(thread_list)
for thread in thread_list.threads:
@@ -579,12 +600,19 @@ class MinidumpReader(object):
assert self.module_list is None
self.module_list = MINIDUMP_MODULE_LIST.Read(
self.minidump, d.location.rva)
+ if ctypes.sizeof(self.module_list) + 4 == d.location.data_size:
+ self.module_list = MINIDUMP_MODULE_LIST_Mac.Read(
+ self.minidump, d.location.rva)
assert ctypes.sizeof(self.module_list) == d.location.data_size
+ DebugPrint(self.module_list)
elif d.stream_type == MD_MEMORY_LIST_STREAM:
print >>sys.stderr, "Warning: This is not a full minidump!"
assert self.memory_list is None
self.memory_list = MINIDUMP_MEMORY_LIST.Read(
self.minidump, d.location.rva)
+ if ctypes.sizeof(self.memory_list) + 4 == d.location.data_size:
+ self.memory_list = MINIDUMP_MEMORY_LIST_Mac.Read(
+ self.minidump, d.location.rva)
assert ctypes.sizeof(self.memory_list) == d.location.data_size
DebugPrint(self.memory_list)
elif d.stream_type == MD_MEMORY_64_LIST_STREAM:
@@ -942,8 +970,11 @@ class HeapObject(object):
p.Print(str(self))
def __str__(self):
+ instance_type = "???"
+ if self.map is not None:
+ instance_type = INSTANCE_TYPES[self.map.instance_type]
return "HeapObject(%s, %s)" % (self.heap.reader.FormatIntPtr(self.address),
- INSTANCE_TYPES[self.map.instance_type])
+ instance_type)
def ObjectField(self, offset):
field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
@@ -1358,9 +1389,9 @@ class JSFunction(HeapObject):
def __str__(self):
inferred_name = ""
- if self.shared.Is(SharedFunctionInfo):
+ if self.shared is not None and self.shared.Is(SharedFunctionInfo):
inferred_name = self.shared.inferred_name
- return "JSFunction(%s, %s)" % \
+ return "JSFunction(%s, %s) " % \
(self.heap.reader.FormatIntPtr(self.address), inferred_name)
def _GetSource(self):
@@ -2066,7 +2097,7 @@ class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
self.send_error(404, 'Web parameter error: %s' % e.message)
-HTML_REG_FORMAT = "<span class=\"register\"><b>%s</b>:&nbsp;%s</span>\n"
+HTML_REG_FORMAT = "<span class=\"register\"><b>%s</b>:&nbsp;%s</span><br/>\n"
class InspectionWebFormatter(object):
@@ -2235,7 +2266,7 @@ class InspectionWebFormatter(object):
f.write("<h3>Exception context</h3>")
f.write('<div class="code">\n')
f.write("Thread id: %d" % exception_thread.id)
- f.write("&nbsp;&nbsp; Exception code: %08X\n" %
+ f.write("&nbsp;&nbsp; Exception code: %08X<br/>\n" %
self.reader.exception.exception.code)
if details == InspectionWebFormatter.CONTEXT_FULL:
if self.reader.exception.exception.parameter_count > 0:
@@ -2807,16 +2838,20 @@ class InspectionShell(cmd.Cmd):
else:
print "%s\n" % string
- def do_dd(self, address):
+ def do_dd(self, args):
"""
- Interpret memory at the given address (if available) as a sequence
- of words. Automatic alignment is not performed.
+ Interpret memory in the given region [address, address + num * word_size)
+ (if available) as a sequence of words. Automatic alignment is not performed.
+ If the num is not specified, a default value of 16 words is used.
+ Synopsis: dd 0x<address> 0x<num>
"""
- start = int(address, 16)
+ args = args.split(' ')
+ start = int(args[0], 16)
+ num = int(args[1], 16) if len(args) > 1 else 0x10
if (start & self.heap.ObjectAlignmentMask()) != 0:
print "Warning: Dumping un-aligned memory, is this what you had in mind?"
for slot in xrange(start,
- start + self.reader.PointerSize() * 10,
+ start + self.reader.PointerSize() * num,
self.reader.PointerSize()):
if not self.reader.IsValidAddress(slot):
print "Address is not contained within the minidump!"
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 696434daf8..7f08ee2f86 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -30,6 +30,8 @@
'icu_use_data_file_flag%': 0,
'v8_code': 1,
'v8_random_seed%': 314159265,
+ 'embed_script%': "",
+ 'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
},
'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
'targets': [
@@ -169,7 +171,8 @@
{
'action_name': 'run_mksnapshot',
'inputs': [
- '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
+ '<(mksnapshot_exec)',
+ '<(embed_script)',
],
'outputs': [
'<(INTERMEDIATE_DIR)/snapshot.cc',
@@ -186,9 +189,10 @@
],
},
'action': [
- '<@(_inputs)',
+ '<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
- '<@(INTERMEDIATE_DIR)/snapshot.cc'
+ '<@(INTERMEDIATE_DIR)/snapshot.cc',
+ '<(embed_script)',
],
},
],
@@ -270,7 +274,7 @@
{
'action_name': 'run_mksnapshot (external)',
'inputs': [
- '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
+ '<(mksnapshot_exec)',
],
'variables': {
'mksnapshot_flags': [
@@ -292,10 +296,11 @@
'<(PRODUCT_DIR)/snapshot_blob_host.bin',
],
'action': [
- '<@(_inputs)',
+ '<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
'<@(INTERMEDIATE_DIR)/snapshot.cc',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ '<(embed_script)',
],
}, {
'outputs': [
@@ -303,10 +308,11 @@
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'action': [
- '<@(_inputs)',
+ '<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
'<@(INTERMEDIATE_DIR)/snapshot.cc',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+ '<(embed_script)',
],
}],
],
@@ -316,10 +322,11 @@
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'action': [
- '<@(_inputs)',
+ '<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
'<@(INTERMEDIATE_DIR)/snapshot.cc',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+ '<(embed_script)',
],
}],
],
@@ -351,14 +358,14 @@
'../../src/allocation-tracker.h',
'../../src/api.cc',
'../../src/api.h',
+ '../../src/api-natives.cc',
+ '../../src/api-natives.h',
'../../src/arguments.cc',
'../../src/arguments.h',
'../../src/assembler.cc',
'../../src/assembler.h',
'../../src/assert-scope.h',
'../../src/assert-scope.cc',
- '../../src/ast-this-access-visitor.cc',
- '../../src/ast-this-access-visitor.h',
'../../src/ast-value-factory.cc',
'../../src/ast-value-factory.h',
'../../src/ast-numbering.cc',
@@ -405,6 +412,8 @@
'../../src/compilation-statistics.h',
'../../src/compiler/access-builder.cc',
'../../src/compiler/access-builder.h',
+ '../../src/compiler/all-nodes.cc',
+ '../../src/compiler/all-nodes.h',
'../../src/compiler/ast-graph-builder.cc',
'../../src/compiler/ast-graph-builder.h',
'../../src/compiler/ast-loop-assignment-analyzer.cc',
@@ -425,6 +434,8 @@
'../../src/compiler/control-builders.cc',
'../../src/compiler/control-builders.h',
'../../src/compiler/control-equivalence.h',
+ '../../src/compiler/control-flow-optimizer.cc',
+ '../../src/compiler/control-flow-optimizer.h',
'../../src/compiler/control-reducer.cc',
'../../src/compiler/control-reducer.h',
'../../src/compiler/diamond.h',
@@ -432,7 +443,6 @@
'../../src/compiler/gap-resolver.cc',
'../../src/compiler/gap-resolver.h',
'../../src/compiler/generic-algorithm.h',
- '../../src/compiler/graph-builder.cc',
'../../src/compiler/graph-builder.h',
'../../src/compiler/graph-inl.h',
'../../src/compiler/graph-reducer.cc',
@@ -459,8 +469,8 @@
'../../src/compiler/js-graph.h',
'../../src/compiler/js-inlining.cc',
'../../src/compiler/js-inlining.h',
- '../../src/compiler/js-intrinsic-builder.cc',
- '../../src/compiler/js-intrinsic-builder.h',
+ '../../src/compiler/js-intrinsic-lowering.cc',
+ '../../src/compiler/js-intrinsic-lowering.h',
'../../src/compiler/js-operator.cc',
'../../src/compiler/js-operator.h',
'../../src/compiler/js-typed-lowering.cc',
@@ -474,6 +484,8 @@
'../../src/compiler/load-elimination.h',
'../../src/compiler/loop-analysis.cc',
'../../src/compiler/loop-analysis.h',
+ '../../src/compiler/loop-peeling.cc',
+ '../../src/compiler/loop-peeling.h',
'../../src/compiler/machine-operator-reducer.cc',
'../../src/compiler/machine-operator-reducer.h',
'../../src/compiler/machine-operator.cc',
@@ -482,12 +494,13 @@
'../../src/compiler/machine-type.h',
'../../src/compiler/move-optimizer.cc',
'../../src/compiler/move-optimizer.h',
- '../../src/compiler/node-aux-data-inl.h',
'../../src/compiler/node-aux-data.h',
'../../src/compiler/node-cache.cc',
'../../src/compiler/node-cache.h',
+ '../../src/compiler/node-marker.cc',
+ '../../src/compiler/node-marker.h',
'../../src/compiler/node-matchers.h',
- '../../src/compiler/node-properties-inl.h',
+ '../../src/compiler/node-properties.cc',
'../../src/compiler/node-properties.h',
'../../src/compiler/node.cc',
'../../src/compiler/node.h',
@@ -497,6 +510,8 @@
'../../src/compiler/operator-properties.h',
'../../src/compiler/operator.cc',
'../../src/compiler/operator.h',
+ '../../src/compiler/osr.cc',
+ '../../src/compiler/osr.h',
'../../src/compiler/pipeline.cc',
'../../src/compiler/pipeline.h',
'../../src/compiler/pipeline-statistics.cc',
@@ -701,8 +716,6 @@
'../../src/ic/ic.h',
'../../src/ic/ic-compiler.cc',
'../../src/ic/ic-compiler.h',
- '../../src/interface.cc',
- '../../src/interface.h',
'../../src/interface-descriptors.cc',
'../../src/interface-descriptors.h',
'../../src/interpreter-irregexp.cc',
@@ -740,6 +753,8 @@
'../../src/macro-assembler.h',
'../../src/messages.cc',
'../../src/messages.h',
+ '../../src/modules.cc',
+ '../../src/modules.h',
'../../src/msan.h',
'../../src/natives.h',
'../../src/objects-debug.cc',
@@ -782,7 +797,6 @@
'../../src/rewriter.h',
'../../src/runtime-profiler.cc',
'../../src/runtime-profiler.h',
- '../../src/runtime/runtime-api.cc',
'../../src/runtime/runtime-array.cc',
'../../src/runtime/runtime-classes.cc',
'../../src/runtime/runtime-collections.cc',
@@ -879,7 +893,6 @@
'../../src/version.h',
'../../src/vm-state-inl.h',
'../../src/vm-state.h',
- '../../src/zone-inl.h',
'../../src/zone.cc',
'../../src/zone.h',
'../../src/third_party/fdlibm/fdlibm.cc',
@@ -1192,6 +1205,49 @@
'../../src/compiler/x64/linkage-x64.cc',
],
}],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ '../../src/ppc/assembler-ppc-inl.h',
+ '../../src/ppc/assembler-ppc.cc',
+ '../../src/ppc/assembler-ppc.h',
+ '../../src/ppc/builtins-ppc.cc',
+ '../../src/ppc/code-stubs-ppc.cc',
+ '../../src/ppc/code-stubs-ppc.h',
+ '../../src/ppc/codegen-ppc.cc',
+ '../../src/ppc/codegen-ppc.h',
+ '../../src/ppc/constants-ppc.h',
+ '../../src/ppc/constants-ppc.cc',
+ '../../src/ppc/cpu-ppc.cc',
+ '../../src/ppc/debug-ppc.cc',
+ '../../src/ppc/deoptimizer-ppc.cc',
+ '../../src/ppc/disasm-ppc.cc',
+ '../../src/ppc/frames-ppc.cc',
+ '../../src/ppc/frames-ppc.h',
+ '../../src/ppc/full-codegen-ppc.cc',
+ '../../src/ppc/interface-descriptors-ppc.cc',
+ '../../src/ppc/interface-descriptors-ppc.h',
+ '../../src/ppc/lithium-ppc.cc',
+ '../../src/ppc/lithium-ppc.h',
+ '../../src/ppc/lithium-codegen-ppc.cc',
+ '../../src/ppc/lithium-codegen-ppc.h',
+ '../../src/ppc/lithium-gap-resolver-ppc.cc',
+ '../../src/ppc/lithium-gap-resolver-ppc.h',
+ '../../src/ppc/macro-assembler-ppc.cc',
+ '../../src/ppc/macro-assembler-ppc.h',
+ '../../src/ppc/regexp-macro-assembler-ppc.cc',
+ '../../src/ppc/regexp-macro-assembler-ppc.h',
+ '../../src/ppc/simulator-ppc.cc',
+ '../../src/compiler/ppc/code-generator-ppc.cc',
+ '../../src/compiler/ppc/instruction-codes-ppc.h',
+ '../../src/compiler/ppc/instruction-selector-ppc.cc',
+ '../../src/compiler/ppc/linkage-ppc.cc',
+ '../../src/ic/ppc/access-compiler-ppc.cc',
+ '../../src/ic/ppc/handler-compiler-ppc.cc',
+ '../../src/ic/ppc/ic-ppc.cc',
+ '../../src/ic/ppc/ic-compiler-ppc.cc',
+ '../../src/ic/ppc/stub-cache-ppc.cc',
+ ],
+ }],
['OS=="win"', {
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',
@@ -1254,6 +1310,7 @@
'../../src/base/atomicops_internals_atomicword_compat.h',
'../../src/base/atomicops_internals_mac.h',
'../../src/base/atomicops_internals_mips_gcc.h',
+ '../../src/base/atomicops_internals_ppc_gcc.h',
'../../src/base/atomicops_internals_tsan.h',
'../../src/base/atomicops_internals_x86_gcc.cc',
'../../src/base/atomicops_internals_x86_gcc.h',
@@ -1439,6 +1496,12 @@
],
}
],
+ ['OS=="aix"', {
+ 'sources': [
+ '../../src/base/platform/platform-aix.cc',
+ '../../src/base/platform/platform-posix.cc'
+ ]},
+ ],
['OS=="solaris"', {
'link_settings': {
'libraries': [
@@ -1614,7 +1677,6 @@
'../../src/uri.js',
'../../src/third_party/fdlibm/fdlibm.js',
'../../src/math.js',
- '../../src/apinatives.js',
'../../src/date.js',
'../../src/regexp.js',
'../../src/arraybuffer.js',
@@ -1643,7 +1705,6 @@
'../../src/harmony-array-includes.js',
'../../src/harmony-tostring.js',
'../../src/harmony-typedarray.js',
- '../../src/harmony-classes.js',
'../../src/harmony-templates.js',
'../../src/harmony-regexp.js'
],
diff --git a/deps/v8/tools/logreader.js b/deps/v8/tools/logreader.js
index 5f0ec7f6c7..ceac2b8268 100644
--- a/deps/v8/tools/logreader.js
+++ b/deps/v8/tools/logreader.js
@@ -35,15 +35,21 @@
*
* @param {Array.<Object>} dispatchTable A table used for parsing and processing
* log records.
+ * @param {boolean} timedRange Ignore ticks outside timed range.
* @constructor
*/
-function LogReader(dispatchTable) {
+function LogReader(dispatchTable, timedRange) {
/**
* @type {Array.<Object>}
*/
this.dispatchTable_ = dispatchTable;
/**
+ * @type {boolean}
+ */
+ this.timedRange_ = timedRange;
+
+ /**
* Current line.
* @type {number}
*/
@@ -54,6 +60,18 @@ function LogReader(dispatchTable) {
* @type {CsvParser}
*/
this.csvParser_ = new CsvParser();
+
+ /**
+ * Keeps track of whether we've seen a "current-time" tick yet.
+ * @type {boolean}
+ */
+ this.hasSeenTimerMarker_ = false;
+
+ /**
+ * List of log lines seen since last "current-time" tick.
+ * @type {Array.<String>}
+ */
+ this.logLinesSinceLastTimerMarker_ = [];
};
@@ -83,7 +101,24 @@ LogReader.prototype.processLogChunk = function(chunk) {
* @param {string} line A line of log.
*/
LogReader.prototype.processLogLine = function(line) {
- this.processLog_([line]);
+ if (!this.timedRange_) {
+ this.processLog_([line]);
+ return;
+ }
+ if (line.startsWith("current-time")) {
+ if (this.hasSeenTimerMarker_) {
+ this.processLog_(this.logLinesSinceLastTimerMarker_);
+ this.logLinesSinceLastTimerMarker_ = [];
+ } else {
+ this.hasSeenTimerMarker_ = true;
+ }
+ } else {
+ if (this.hasSeenTimerMarker_) {
+ this.logLinesSinceLastTimerMarker_.push(line);
+ } else if (!line.startsWith("tick")) {
+ this.processLog_([line]);
+ }
+ }
};
diff --git a/deps/v8/tools/ninja/ninja_output.py b/deps/v8/tools/ninja/ninja_output.py
new file mode 100644
index 0000000000..ec4d27e097
--- /dev/null
+++ b/deps/v8/tools/ninja/ninja_output.py
@@ -0,0 +1,44 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import os
+import os.path
+
+
+def GetNinjaOutputDirectory(v8_root, configuration=None):
+ """Returns <v8_root>/<output_dir>/(Release|Debug).
+
+ The configuration chosen is the one most recently generated/built, but can be
+ overriden via the <configuration> parameter. Detects a custom output_dir
+ specified by GYP_GENERATOR_FLAGS."""
+
+ output_dir = 'out'
+ generator_flags = os.getenv('GYP_GENERATOR_FLAGS', '').split(' ')
+ for flag in generator_flags:
+ name_value = flag.split('=', 1)
+ if len(name_value) == 2 and name_value[0] == 'output_dir':
+ output_dir = name_value[1]
+
+ root = os.path.join(v8_root, output_dir)
+ if configuration:
+ return os.path.join(root, configuration)
+
+ debug_path = os.path.join(root, 'Debug')
+ release_path = os.path.join(root, 'Release')
+
+ def is_release_newer(test_path):
+ try:
+ debug_mtime = os.path.getmtime(os.path.join(debug_path, test_path))
+ except os.error:
+ debug_mtime = 0
+ try:
+ rel_mtime = os.path.getmtime(os.path.join(release_path, test_path))
+ except os.error:
+ rel_mtime = 0
+ return rel_mtime >= debug_mtime
+
+ if is_release_newer('.ninja_log') or is_release_newer('.ninja_deps'):
+ return release_path
+ return debug_path
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index 2cafc838e1..3e41bf94bb 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -95,7 +95,7 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
v8::base::ElapsedTimer timer;
timer.Start();
// Allow lazy parsing; otherwise we won't produce cached data.
- bool success = Parser::Parse(&info, true);
+ bool success = Parser::ParseStatic(&info, true);
parse_time1 = timer.Elapsed();
if (!success) {
fprintf(stderr, "Parsing failed\n");
@@ -111,7 +111,7 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
v8::base::ElapsedTimer timer;
timer.Start();
// Allow lazy parsing; otherwise cached data won't help.
- bool success = Parser::Parse(&info, true);
+ bool success = Parser::ParseStatic(&info, true);
parse_time2 = timer.Elapsed();
if (!success) {
fprintf(stderr, "Parsing failed\n");
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 321d2910d5..97b3b597d1 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -438,6 +438,33 @@ def CheckExternalReferenceRegistration(workspace):
[sys.executable, join(workspace, "tools", "external-reference-check.py")])
return code == 0
+def CheckAuthorizedAuthor(input_api, output_api):
+ """For non-googler/chromites committers, verify the author's email address is
+ in AUTHORS.
+ """
+ # TODO(maruel): Add it to input_api?
+ import fnmatch
+
+ author = input_api.change.author_email
+ if not author:
+ input_api.logging.info('No author, skipping AUTHOR check')
+ return []
+ authors_path = input_api.os_path.join(
+ input_api.PresubmitLocalPath(), 'AUTHORS')
+ valid_authors = (
+ input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
+ for line in open(authors_path))
+ valid_authors = [item.group(1).lower() for item in valid_authors if item]
+ if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
+ input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
+ return [output_api.PresubmitPromptWarning(
+ ('%s is not in AUTHORS file. If you are a new contributor, please visit'
+ '\n'
+ 'http://www.chromium.org/developers/contributing-code and read the '
+ '"Legal" section\n'
+ 'If you are a chromite, verify the contributor signed the CLA.') %
+ author)]
+ return []
def GetOptions():
result = optparse.OptionParser()
diff --git a/deps/v8/tools/profile.js b/deps/v8/tools/profile.js
index a06cd3a5ff..f0814a2f0d 100644
--- a/deps/v8/tools/profile.js
+++ b/deps/v8/tools/profile.js
@@ -257,26 +257,28 @@ Profile.prototype.resolveAndFilterFuncs_ = function(stack) {
var entry = this.codeMap_.findEntry(stack[i]);
if (entry) {
var name = entry.getName();
- if (i == 0 && (entry.type == 'CPP' || entry.type == 'SHARED_LIB')) {
+ if (i === 0 && (entry.type === 'CPP' || entry.type === 'SHARED_LIB')) {
look_for_first_c_function = true;
}
- if (look_for_first_c_function) {
- if (entry.type == 'CPP') {
- last_seen_c_function = name;
- } else if (i > 0 && last_seen_c_function != '') {
- if (this.c_entries_[last_seen_c_function] === undefined) {
- this.c_entries_[last_seen_c_function] = 0;
- }
- this.c_entries_[last_seen_c_function]++;
- look_for_first_c_function = false; // Found it, we're done.
- }
+ if (look_for_first_c_function && entry.type === 'CPP') {
+ last_seen_c_function = name;
}
if (!this.skipThisFunction(name)) {
result.push(name);
}
} else {
- this.handleUnknownCode(
- Profile.Operation.TICK, stack[i], i);
+ this.handleUnknownCode(Profile.Operation.TICK, stack[i], i);
+ if (i === 0) result.push("UNKNOWN");
+ }
+ if (look_for_first_c_function &&
+ i > 0 &&
+ (!entry || entry.type !== 'CPP') &&
+ last_seen_c_function !== '') {
+ if (this.c_entries_[last_seen_c_function] === undefined) {
+ this.c_entries_[last_seen_c_function] = 0;
+ }
+ this.c_entries_[last_seen_c_function]++;
+ look_for_first_c_function = false; // Found it, we're done.
}
}
return result;
diff --git a/deps/v8/tools/push-to-trunk/bump_up_version.py b/deps/v8/tools/push-to-trunk/bump_up_version.py
deleted file mode 100755
index 647708c0c7..0000000000
--- a/deps/v8/tools/push-to-trunk/bump_up_version.py
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Script for auto-increasing the version on bleeding_edge.
-
-The script can be run regularly by a cron job. It will increase the build
-level of the version on bleeding_edge if:
-- the lkgr version is smaller than the version of the latest revision,
-- the lkgr version is not a version change itself,
-- the tree is not closed for maintenance.
-
-The new version will be the maximum of the bleeding_edge and trunk versions +1.
-E.g. latest bleeding_edge version: 3.22.11.0 and latest trunk 3.23.0.0 gives
-the new version 3.23.1.0.
-
-This script requires a depot tools git checkout. I.e. 'fetch v8'.
-"""
-
-import argparse
-import os
-import sys
-
-from common_includes import *
-
-VERSION_BRANCH = "auto-bump-up-version"
-
-
-# TODO(machenbach): Add vc interface that works on git mirror.
-class Preparation(Step):
- MESSAGE = "Preparation."
-
- def RunStep(self):
- # TODO(machenbach): Remove after the git switch.
- if(self.Config("PERSISTFILE_BASENAME") ==
- "/tmp/v8-bump-up-version-tempfile"):
- print "This script is disabled until after the v8 git migration."
- return True
-
- # Check for a clean workdir.
- if not self.GitIsWorkdirClean(): # pragma: no cover
- # This is in case a developer runs this script on a dirty tree.
- self.GitStash()
-
- self.GitCheckout("master")
-
- self.GitPull()
-
- # Ensure a clean version branch.
- self.DeleteBranch(VERSION_BRANCH)
-
-
-class GetCurrentBleedingEdgeVersion(Step):
- MESSAGE = "Get latest bleeding edge version."
-
- def RunStep(self):
- self.GitCheckout("master")
-
- # Store latest version and revision.
- self.ReadAndPersistVersion()
- self["latest_version"] = self.ArrayToVersion("")
- self["latest"] = self.GitLog(n=1, format="%H")
- print "Bleeding edge version: %s" % self["latest_version"]
-
-
-# This step is pure paranoia. It forbids the script to continue if the last
-# commit changed version.cc. Just in case the other bailout has a bug, this
-# prevents the script from continuously commiting version changes.
-class LastChangeBailout(Step):
- MESSAGE = "Stop script if the last change modified the version."
-
- def RunStep(self):
- if VERSION_FILE in self.GitChangedFiles(self["latest"]):
- print "Stop due to recent version change."
- return True
-
-
-# TODO(machenbach): Implement this for git.
-class FetchLKGR(Step):
- MESSAGE = "Fetching V8 LKGR."
-
- def RunStep(self):
- lkgr_url = "https://v8-status.appspot.com/lkgr"
- self["lkgr_svn"] = self.ReadURL(lkgr_url, wait_plan=[5])
-
-
-# TODO(machenbach): Implement this for git. With a git lkgr we could simply
-# checkout that revision. With svn, we have to search backwards until that
-# revision is found.
-class GetLKGRVersion(Step):
- MESSAGE = "Get bleeding edge lkgr version."
-
- def RunStep(self):
- self.GitCheckout("master")
- # If the commit was made from svn, there is a mapping entry in the commit
- # message.
- self["lkgr"] = self.GitLog(
- grep="^git-svn-id: [^@]*@%s [A-Za-z0-9-]*$" % self["lkgr_svn"],
- format="%H")
-
- # FIXME(machenbach): http://crbug.com/391712 can lead to svn lkgrs on the
- # trunk branch (rarely).
- if not self["lkgr"]: # pragma: no cover
- self.Die("No git hash found for svn lkgr.")
-
- self.GitCreateBranch(VERSION_BRANCH, self["lkgr"])
- self.ReadAndPersistVersion("lkgr_")
- self["lkgr_version"] = self.ArrayToVersion("lkgr_")
- print "LKGR version: %s" % self["lkgr_version"]
-
- # Ensure a clean version branch.
- self.GitCheckout("master")
- self.DeleteBranch(VERSION_BRANCH)
-
-
-class LKGRVersionUpToDateBailout(Step):
- MESSAGE = "Stop script if the lkgr has a renewed version."
-
- def RunStep(self):
- # If a version-change commit becomes the lkgr, don't bump up the version
- # again.
- if VERSION_FILE in self.GitChangedFiles(self["lkgr"]):
- print "Stop because the lkgr is a version change itself."
- return True
-
- # Don't bump up the version if it got updated already after the lkgr.
- if SortingKey(self["lkgr_version"]) < SortingKey(self["latest_version"]):
- print("Stop because the latest version already changed since the lkgr "
- "version.")
- return True
-
-
-class GetTrunkVersion(Step):
- MESSAGE = "Get latest trunk version."
-
- def RunStep(self):
- self.GitCheckout("candidates")
- self.GitPull()
- self.ReadAndPersistVersion("trunk_")
- self["trunk_version"] = self.ArrayToVersion("trunk_")
- print "Trunk version: %s" % self["trunk_version"]
-
-
-class CalculateVersion(Step):
- MESSAGE = "Calculate the new version."
-
- def RunStep(self):
- if self["lkgr_build"] == "9999": # pragma: no cover
- # If version control on bleeding edge was switched off, just use the last
- # trunk version.
- self["lkgr_version"] = self["trunk_version"]
-
- # The new version needs to be greater than the max on bleeding edge and
- # trunk.
- max_version = max(self["trunk_version"],
- self["lkgr_version"],
- key=SortingKey)
-
- # Strip off possible leading zeros.
- self["new_major"], self["new_minor"], self["new_build"], _ = (
- map(str, map(int, max_version.split("."))))
-
- self["new_build"] = str(int(self["new_build"]) + 1)
- self["new_patch"] = "0"
-
- self["new_version"] = ("%s.%s.%s.0" %
- (self["new_major"], self["new_minor"], self["new_build"]))
- print "New version is %s" % self["new_version"]
-
- if self._options.dry_run: # pragma: no cover
- print "Dry run, skipping version change."
- return True
-
-
-class CheckTreeStatus(Step):
- MESSAGE = "Checking v8 tree status message."
-
- def RunStep(self):
- status_url = "https://v8-status.appspot.com/current?format=json"
- status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
- message = json.loads(status_json)["message"]
- if re.search(r"maintenance|no commits", message, flags=re.I):
- print "Skip version change by tree status: \"%s\"" % message
- return True
-
-
-class ChangeVersion(Step):
- MESSAGE = "Bump up the version."
-
- def RunStep(self):
- self.GitCreateBranch(VERSION_BRANCH, "master")
-
- self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
-
- try:
- msg = "[Auto-roll] Bump up version to %s" % self["new_version"]
- self.GitCommit("%s\n\nTBR=%s" % (msg, self._options.author),
- author=self._options.author)
- self.GitUpload(author=self._options.author,
- force=self._options.force_upload,
- bypass_hooks=True)
- self.GitCLLand()
- print "Successfully changed the version."
- finally:
- # Clean up.
- self.GitCheckout("master")
- self.DeleteBranch(VERSION_BRANCH)
-
-
-class BumpUpVersion(ScriptsBase):
- def _PrepareOptions(self, parser):
- parser.add_argument("--dry_run", help="Don't commit the new version.",
- default=False, action="store_true")
-
- def _ProcessOptions(self, options): # pragma: no cover
- if not options.dry_run and not options.author:
- print "Specify your chromium.org email with -a"
- return False
- options.wait_for_lgtm = False
- options.force_readline_defaults = True
- options.force_upload = True
- return True
-
- def _Config(self):
- return {
- "PERSISTFILE_BASENAME": "/tmp/v8-bump-up-version-tempfile",
- "PATCH_FILE": "/tmp/v8-bump-up-version-tempfile-patch-file",
- }
-
- def _Steps(self):
- return [
- Preparation,
- GetCurrentBleedingEdgeVersion,
- LastChangeBailout,
- FetchLKGR,
- GetLKGRVersion,
- LKGRVersionUpToDateBailout,
- GetTrunkVersion,
- CalculateVersion,
- CheckTreeStatus,
- ChangeVersion,
- ]
-
-if __name__ == "__main__": # pragma: no cover
- sys.exit(BumpUpVersion().Run())
diff --git a/deps/v8/tools/push-to-trunk/generate_version.py b/deps/v8/tools/push-to-trunk/generate_version.py
deleted file mode 100755
index b4a0221eae..0000000000
--- a/deps/v8/tools/push-to-trunk/generate_version.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Script to set v8's version file to the version given by the latest tag.
-"""
-
-
-import os
-import re
-import subprocess
-import sys
-
-
-CWD = os.path.abspath(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-VERSION_CC = os.path.join(CWD, "src", "version.cc")
-
-def main():
- tag = subprocess.check_output(
- "git describe --tags",
- shell=True,
- cwd=CWD,
- ).strip()
- assert tag
-
- # Check for commits not exactly matching a tag. Those are candidate builds
- # for the next version. The output has the form
- # <tag name>-<n commits>-<hash>.
- if "-" in tag:
- version = tag.split("-")[0]
- candidate = "1"
- else:
- version = tag
- candidate = "0"
- version_levels = version.split(".")
-
- # Set default patch level if none is given.
- if len(version_levels) == 3:
- version_levels.append("0")
- assert len(version_levels) == 4
-
- major, minor, build, patch = version_levels
-
- # Increment build level for candidate builds.
- if candidate == "1":
- build = str(int(build) + 1)
- patch = "0"
-
- # Modify version.cc with the new values.
- with open(VERSION_CC, "r") as f:
- text = f.read()
- output = []
- for line in text.split("\n"):
- for definition, substitute in (
- ("MAJOR_VERSION", major),
- ("MINOR_VERSION", minor),
- ("BUILD_NUMBER", build),
- ("PATCH_LEVEL", patch),
- ("IS_CANDIDATE_VERSION", candidate)):
- if line.startswith("#define %s" % definition):
- line = re.sub("\d+$", substitute, line)
- output.append(line)
- with open(VERSION_CC, "w") as f:
- f.write("\n".join(output))
-
- # Log what was done.
- candidate_txt = " (candidate)" if candidate == "1" else ""
- patch_txt = ".%s" % patch if patch != "0" else ""
- version_txt = ("%s.%s.%s%s%s" %
- (major, minor, build, patch_txt, candidate_txt))
- print "Modified version.cc. Set V8 version to %s" % version_txt
- return 0
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/deps/v8/tools/push-to-trunk/auto_push.py b/deps/v8/tools/release/auto_push.py
index 34afa4aaf8..121288f5b5 100755
--- a/deps/v8/tools/push-to-trunk/auto_push.py
+++ b/deps/v8/tools/release/auto_push.py
@@ -34,9 +34,8 @@ import sys
import urllib
from common_includes import *
-import push_to_trunk
+import push_to_candidates
-PUSH_MESSAGE_RE = re.compile(r".* \(based on ([a-fA-F0-9]+)\)$")
class Preparation(Step):
MESSAGE = "Preparation."
@@ -46,55 +45,25 @@ class Preparation(Step):
self.CommonPrepare()
-class CheckAutoPushSettings(Step):
- MESSAGE = "Checking settings file."
-
- def RunStep(self):
- settings_file = os.path.realpath(self.Config("SETTINGS_LOCATION"))
- if os.path.exists(settings_file):
- settings_dict = json.loads(FileToText(settings_file))
- if settings_dict.get("enable_auto_roll") is False:
- self.Die("Push to trunk disabled by auto-roll settings file: %s"
- % settings_file)
-
-
-class CheckTreeStatus(Step):
- MESSAGE = "Checking v8 tree status message."
-
- def RunStep(self):
- status_url = "https://v8-status.appspot.com/current?format=json"
- status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
- self["tree_message"] = json.loads(status_json)["message"]
- if re.search(r"nopush|no push", self["tree_message"], flags=re.I):
- self.Die("Push to trunk disabled by tree state: %s"
- % self["tree_message"])
-
-
class FetchCandidate(Step):
- MESSAGE = "Fetching V8 roll candidate ref."
+ MESSAGE = "Fetching V8 roll ref."
def RunStep(self):
- self.Git("fetch origin +refs/heads/candidate:refs/heads/candidate")
- self["candidate"] = self.Git("show-ref -s refs/heads/candidate").strip()
+ # The roll ref points to the candidate to be rolled.
+ self.Git("fetch origin +refs/heads/roll:refs/heads/roll")
+ self["candidate"] = self.Git("show-ref -s refs/heads/roll").strip()
-class CheckLastPush(Step):
- MESSAGE = "Checking last V8 push to trunk."
+class LastReleaseBailout(Step):
+ MESSAGE = "Checking last V8 release base."
def RunStep(self):
- last_push = self.FindLastTrunkPush()
-
- # Retrieve the bleeding edge revision of the last push from the text in
- # the push commit message.
- last_push_title = self.GitLog(n=1, format="%s", git_hash=last_push)
- last_push_be = PUSH_MESSAGE_RE.match(last_push_title).group(1)
-
- if not last_push_be: # pragma: no cover
- self.Die("Could not retrieve bleeding edge revision for trunk push %s"
- % last_push)
+ last_release = self.GetLatestReleaseBase()
+ commits = self.GitLog(
+ format="%H", git_hash="%s..%s" % (last_release, self["candidate"]))
- if self["candidate"] == last_push_be:
- print "Already pushed current candidate %s" % last_push_be
+ if not commits:
+ print "Already pushed current candidate %s" % self["candidate"]
return True
@@ -116,13 +85,14 @@ class PushToCandidates(Step):
# TODO(machenbach): Update the script before calling it.
if self._options.push:
- self._side_effect_handler.Call(push_to_trunk.PushToTrunk().Run, args)
+ self._side_effect_handler.Call(
+ push_to_candidates.PushToCandidates().Run, args)
class AutoPush(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-p", "--push",
- help="Push to trunk. Dry run if unspecified.",
+ help="Push to candidates. Dry run if unspecified.",
default=False, action="store_true")
def _ProcessOptions(self, options):
@@ -135,16 +105,13 @@ class AutoPush(ScriptsBase):
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-auto-push-tempfile",
- "SETTINGS_LOCATION": "~/.auto-roll",
}
def _Steps(self):
return [
Preparation,
- CheckAutoPushSettings,
- CheckTreeStatus,
FetchCandidate,
- CheckLastPush,
+ LastReleaseBailout,
PushToCandidates,
]
diff --git a/deps/v8/tools/push-to-trunk/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index 1b57097cde..315a4bc2a0 100755
--- a/deps/v8/tools/push-to-trunk/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -38,31 +38,33 @@ class CheckActiveRoll(Step):
return True
-class DetectLastPush(Step):
- MESSAGE = "Detect commit ID of the last push to trunk."
-
- def RunStep(self):
- self.vc.Fetch()
- push_hash = self.FindLastTrunkPush(
- branch="origin/candidates", include_patches=True)
- self["last_push"] = self.GetCommitPositionNumber(push_hash)
-
-
class DetectLastRoll(Step):
MESSAGE = "Detect commit ID of the last Chromium roll."
def RunStep(self):
+ # The revision that should be rolled.
+ latest_release = self.GetLatestRelease()
+
# Interpret the DEPS file to retrieve the v8 revision.
# TODO(machenbach): This should be part or the roll-deps api of
# depot_tools.
Var = lambda var: '%s'
exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
- last_roll = self.GetCommitPositionNumber(vars['v8_revision'])
- # FIXME(machenbach): When rolling from bleeding edge and from trunk there
- # be different commit numbers here. Better use version?
- if int(last_roll) >= int(self["last_push"]):
+
+ # The revision rolled last.
+ self["last_roll"] = vars['v8_revision']
+
+ # TODO(machenbach): It is possible that the auto-push script made a new
+ # fast-forward release (e.g. 4.2.3) while somebody patches the last
+ # candidate (e.g. 4.2.2.1). In this case, the auto-roller would pick
+ # the fast-forward release. Should there be a way to prioritize the
+ # patched version?
+
+ if latest_release == self["last_roll"]:
+ # We always try to roll if the latest revision is not the revision in
+ # chromium.
print("There is no newer v8 revision than the one in Chromium (%s)."
- % last_roll)
+ % self["last_roll"])
return True
@@ -93,6 +95,7 @@ class RollChromium(Step):
"--author", self._options.author,
"--reviewer", self._options.reviewer,
"--chromium", self._options.chromium,
+ "--last-roll", self["last_roll"],
"--use-commit-queue",
]
if self._options.sheriff:
@@ -131,7 +134,6 @@ class AutoRoll(ScriptsBase):
def _Steps(self):
return [
CheckActiveRoll,
- DetectLastPush,
DetectLastRoll,
CheckClusterFuzz,
RollChromium,
diff --git a/deps/v8/tools/push-to-trunk/auto_tag.py b/deps/v8/tools/release/auto_tag.py
index a52a028697..a52a028697 100755
--- a/deps/v8/tools/push-to-trunk/auto_tag.py
+++ b/deps/v8/tools/release/auto_tag.py
diff --git a/deps/v8/tools/push-to-trunk/check_clusterfuzz.py b/deps/v8/tools/release/check_clusterfuzz.py
index d4ba90ba48..d4ba90ba48 100755
--- a/deps/v8/tools/push-to-trunk/check_clusterfuzz.py
+++ b/deps/v8/tools/release/check_clusterfuzz.py
diff --git a/deps/v8/tools/push-to-trunk/chromium_roll.py b/deps/v8/tools/release/chromium_roll.py
index 5c9a38ecb0..8a3ff4a0a7 100755
--- a/deps/v8/tools/push-to-trunk/chromium_roll.py
+++ b/deps/v8/tools/release/chromium_roll.py
@@ -10,6 +10,10 @@ import sys
from common_includes import *
+ROLL_SUMMARY = ("Summary of changes available at:\n"
+ "https://chromium.googlesource.com/v8/v8/+log/%s..%s")
+
+
class Preparation(Step):
MESSAGE = "Preparation."
@@ -19,14 +23,28 @@ class Preparation(Step):
class DetectLastPush(Step):
- MESSAGE = "Detect commit ID of last push to trunk."
+ MESSAGE = "Detect commit ID of last release."
def RunStep(self):
- self["last_push"] = self._options.last_push or self.FindLastTrunkPush(
- branch="origin/candidates", include_patches=True)
+ # The revision that should be rolled.
+ self["last_push"] = self._options.last_push or self.GetLatestRelease()
self["push_title"] = self.GitLog(n=1, format="%s",
git_hash=self["last_push"])
+ # The master revision this release is based on.
+ self["push_base"] = self.GetLatestReleaseBase()
+
+ # FIXME(machenbach): Manually specifying a revision doesn't work at the
+ # moment. Needs more complicated logic to find the correct push_base above.
+ # Maybe delete that parameter entirely?
+ assert not self._options.last_push
+
+ # Determine the master revision of the last roll.
+ version = self.GetVersionTag(self._options.last_roll)
+ assert version
+ self["last_rolled_base"] = self.GetLatestReleaseBase(version=version)
+ assert self["last_rolled_base"]
+
class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
@@ -69,13 +87,17 @@ class UploadCL(Step):
cwd=self._options.chromium) is None:
self.Die("Failed to create deps for %s" % self["last_push"])
- commit_title = "Update V8 to %s." % self["push_title"].lower()
- sheriff = ""
+ message = []
+ message.append("Update V8 to %s." % self["push_title"].lower())
+
+ message.append(
+ ROLL_SUMMARY % (self["last_rolled_base"][:8], self["push_base"][:8]))
+
if self["sheriff"]:
- sheriff = ("\n\nPlease reply to the V8 sheriff %s in case of problems."
- % self["sheriff"])
- self.GitCommit("%s%s\n\nTBR=%s" %
- (commit_title, sheriff, self._options.reviewer),
+ message.append("Please reply to the V8 sheriff %s in case of problems."
+ % self["sheriff"])
+ message.append("TBR=%s" % self._options.reviewer)
+ self.GitCommit("\n\n".join(message),
author=self._options.author,
cwd=self._options.chromium)
if not self._options.dry_run:
@@ -118,7 +140,9 @@ class ChromiumRoll(ScriptsBase):
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("-l", "--last-push",
- help="The git commit ID of the last push to trunk.")
+ help="The git commit ID of the last candidates push.")
+ parser.add_argument("--last-roll", required=True,
+ help="The git commit ID of the last rolled version.")
parser.add_argument("--use-commit-queue",
help="Check the CQ bit on upload.",
default=False, action="store_true")
diff --git a/deps/v8/tools/push-to-trunk/common_includes.py b/deps/v8/tools/release/common_includes.py
index ac78ef8d27..bae05bc6b5 100644
--- a/deps/v8/tools/push-to-trunk/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -46,7 +46,10 @@ from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
CHANGELOG_FILE = "ChangeLog"
+PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
+PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
VERSION_FILE = os.path.join("src", "version.cc")
+VERSION_RE = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
# V8 base directory.
V8_BASE = os.path.dirname(
@@ -329,9 +332,25 @@ class GitInterface(VCInterface):
return "origin/candidates"
def RemoteBranch(self, name):
+ # Assume that if someone "fully qualified" the ref, they know what they
+ # want.
+ if name.startswith('refs/'):
+ return name
if name in ["candidates", "master"]:
- return "origin/%s" % name
- return "branch-heads/%s" % name
+ return "refs/remotes/origin/%s" % name
+ try:
+ # Check if branch is in heads.
+ if self.step.Git("show-ref refs/remotes/origin/%s" % name).strip():
+ return "refs/remotes/origin/%s" % name
+ except GitFailedException:
+ pass
+ try:
+ # Check if branch is in branch-heads.
+ if self.step.Git("show-ref refs/remotes/branch-heads/%s" % name).strip():
+ return "refs/remotes/branch-heads/%s" % name
+ except GitFailedException:
+ pass
+ self.Die("Can't find remote of %s" % name)
def Tag(self, tag, remote, message):
# Wait for the commit to appear. Assumes unique commit message titles (this
@@ -380,7 +399,7 @@ class Step(GitRecipesMixin):
def __getitem__(self, key):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
- return self._state[key]
+ return self._state.get(key)
def __setitem__(self, key, value):
# Convenience method to allow direct [] access on step classes for
@@ -589,16 +608,79 @@ class Step(GitRecipesMixin):
except GitFailedException:
self.WaitForResolvingConflicts(patch_file)
- def FindLastTrunkPush(
- self, parent_hash="", branch="", include_patches=False):
- push_pattern = "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*"
- if not include_patches:
- # Non-patched versions only have three numbers followed by the "(based
- # on...) comment."
- push_pattern += " (based"
- branch = "" if parent_hash else branch or self.vc.RemoteCandidateBranch()
- return self.GitLog(n=1, format="%H", grep=push_pattern,
- parent_hash=parent_hash, branch=branch)
+ def GetVersionTag(self, revision):
+ tag = self.Git("describe --tags %s" % revision).strip()
+ if VERSION_RE.match(tag):
+ return tag
+ else:
+ return None
+
+ def GetRecentReleases(self, max_age):
+ # Make sure tags are fetched.
+ self.Git("fetch origin +refs/tags/*:refs/tags/*")
+
+ # Current timestamp.
+ time_now = int(self._side_effect_handler.GetUTCStamp())
+
+ # List every tag from a given period.
+ revisions = self.Git("rev-list --max-age=%d --tags" %
+ int(time_now - max_age)).strip()
+
+ # Filter out revisions who's tag is off by one or more commits.
+ return filter(lambda r: self.GetVersionTag(r), revisions.splitlines())
+
+ def GetLatestVersion(self):
+ # Use cached version if available.
+ if self["latest_version"]:
+ return self["latest_version"]
+
+ # Make sure tags are fetched.
+ self.Git("fetch origin +refs/tags/*:refs/tags/*")
+ version = sorted(filter(VERSION_RE.match, self.vc.GetTags()),
+ key=SortingKey, reverse=True)[0]
+ self["latest_version"] = version
+ return version
+
+ def GetLatestRelease(self):
+ """The latest release is the git hash of the latest tagged version.
+
+ This revision should be rolled into chromium.
+ """
+ latest_version = self.GetLatestVersion()
+
+ # The latest release.
+ latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
+ assert latest_hash
+ return latest_hash
+
+ def GetLatestReleaseBase(self, version=None):
+ """The latest release base is the latest revision that is covered in the
+ last change log file. It doesn't include cherry-picked patches.
+ """
+ latest_version = version or self.GetLatestVersion()
+
+ # Strip patch level if it exists.
+ latest_version = ".".join(latest_version.split(".")[:3])
+
+ # The latest release base.
+ latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
+ assert latest_hash
+
+ title = self.GitLog(n=1, format="%s", git_hash=latest_hash)
+ match = PUSH_MSG_GIT_RE.match(title)
+ if match:
+ # Legacy: In the old process there's one level of indirection. The
+ # version is on the candidates branch and points to the real release
+ # base on master through the commit message.
+ return match.group("git_rev")
+ match = PUSH_MSG_NEW_RE.match(title)
+ if match:
+ # This is a new-style v8 version branched from master. The commit
+ # "latest_hash" is the version-file change. Its parent is the release
+ # base on master.
+ return self.GitLog(n=1, format="%H", git_hash="%s^" % latest_hash)
+
+ self.Die("Unknown latest release: %s" % latest_hash)
def ArrayToVersion(self, prefix):
return ".".join([self[prefix + "major"],
@@ -606,6 +688,16 @@ class Step(GitRecipesMixin):
self[prefix + "build"],
self[prefix + "patch"]])
+ def StoreVersion(self, version, prefix):
+ version_parts = version.split(".")
+ if len(version_parts) == 3:
+ version_parts.append("0")
+ major, minor, build, patch = version_parts
+ self[prefix + "major"] = major
+ self[prefix + "minor"] = minor
+ self[prefix + "build"] = build
+ self[prefix + "patch"] = patch
+
def SetVersion(self, version_file, prefix):
output = ""
for line in FileToText(version_file).splitlines():
@@ -617,6 +709,9 @@ class Step(GitRecipesMixin):
line = re.sub("\d+$", self[prefix + "build"], line)
elif line.startswith("#define PATCH_LEVEL"):
line = re.sub("\d+$", self[prefix + "patch"], line)
+ elif (self[prefix + "candidate"] and
+ line.startswith("#define IS_CANDIDATE_VERSION")):
+ line = re.sub("\d+$", self[prefix + "candidate"], line)
output += "%s\n" % line
TextToFile(output, version_file)
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
new file mode 100755
index 0000000000..44c10d9b30
--- /dev/null
+++ b/deps/v8/tools/release/create_release.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import sys
+import tempfile
+import urllib2
+
+from common_includes import *
+
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ fetchspecs = [
+ "+refs/heads/*:refs/heads/*",
+ "+refs/pending/*:refs/pending/*",
+ "+refs/pending-tags/*:refs/pending-tags/*",
+ ]
+ self.Git("fetch origin %s" % " ".join(fetchspecs))
+ self.GitCheckout("origin/master")
+ self.DeleteBranch("work-branch")
+
+
+class PrepareBranchRevision(Step):
+ MESSAGE = "Check from which revision to branch off."
+
+ def RunStep(self):
+ if self._options.revision:
+ self["push_hash"], tree_object = self.GitLog(
+ n=1, format="\"%H %T\"", git_hash=self._options.revision).split(" ")
+ else:
+ self["push_hash"], tree_object = self.GitLog(
+ n=1, format="\"%H %T\"", branch="origin/master").split(" ")
+ print "Release revision %s" % self["push_hash"]
+ assert self["push_hash"]
+
+ pending_tuples = self.GitLog(
+ n=200, format="\"%H %T\"", branch="refs/pending/heads/master")
+ for hsh, tree in map(lambda s: s.split(" "), pending_tuples.splitlines()):
+ if tree == tree_object:
+ self["pending_hash"] = hsh
+ break
+ print "Pending release revision %s" % self["pending_hash"]
+ assert self["pending_hash"]
+
+
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
+
+ def RunStep(self):
+ latest_version = self.GetLatestVersion()
+
+ # The version file on master can be used to bump up major/minor at
+ # branch time.
+ self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
+ self.ReadAndPersistVersion("master_")
+ master_version = self.ArrayToVersion("master_")
+
+ # Use the highest version from master or from tags to determine the new
+ # version.
+ authoritative_version = sorted(
+ [master_version, latest_version], key=SortingKey)[1]
+ self.StoreVersion(authoritative_version, "authoritative_")
+
+ # Variables prefixed with 'new_' contain the new version numbers for the
+ # ongoing candidates push.
+ self["new_major"] = self["authoritative_major"]
+ self["new_minor"] = self["authoritative_minor"]
+ self["new_build"] = str(int(self["authoritative_build"]) + 1)
+
+ # Make sure patch level is 0 in a new push.
+ self["new_patch"] = "0"
+
+ # The new version is not a candidate.
+ self["new_candidate"] = "0"
+
+ self["version"] = "%s.%s.%s" % (self["new_major"],
+ self["new_minor"],
+ self["new_build"])
+
+ print ("Incremented version to %s" % self["version"])
+
+
+class DetectLastRelease(Step):
+ MESSAGE = "Detect commit ID of last release base."
+
+ def RunStep(self):
+ self["last_push_master"] = self.GetLatestReleaseBase()
+
+
+class PrepareChangeLog(Step):
+ MESSAGE = "Prepare raw ChangeLog entry."
+
+ def Reload(self, body):
+ """Attempts to reload the commit message from rietveld in order to allow
+ late changes to the LOG flag. Note: This is brittle to future changes of
+ the web page name or structure.
+ """
+ match = re.search(r"^Review URL: https://codereview\.chromium\.org/(\d+)$",
+ body, flags=re.M)
+ if match:
+ cl_url = ("https://codereview.chromium.org/%s/description"
+ % match.group(1))
+ try:
+ # Fetch from Rietveld but only retry once with one second delay since
+ # there might be many revisions.
+ body = self.ReadURL(cl_url, wait_plan=[1])
+ except urllib2.URLError: # pragma: no cover
+ pass
+ return body
+
+ def RunStep(self):
+ self["date"] = self.GetDate()
+ output = "%s: Version %s\n\n" % (self["date"], self["version"])
+ TextToFile(output, self.Config("CHANGELOG_ENTRY_FILE"))
+ commits = self.GitLog(format="%H",
+ git_hash="%s..%s" % (self["last_push_master"],
+ self["push_hash"]))
+
+ # Cache raw commit messages.
+ commit_messages = [
+ [
+ self.GitLog(n=1, format="%s", git_hash=commit),
+ self.Reload(self.GitLog(n=1, format="%B", git_hash=commit)),
+ self.GitLog(n=1, format="%an", git_hash=commit),
+ ] for commit in commits.splitlines()
+ ]
+
+ # Auto-format commit messages.
+ body = MakeChangeLogBody(commit_messages, auto_format=True)
+ AppendToFile(body, self.Config("CHANGELOG_ENTRY_FILE"))
+
+ msg = (" Performance and stability improvements on all platforms."
+ "\n#\n# The change log above is auto-generated. Please review if "
+ "all relevant\n# commit messages from the list below are included."
+ "\n# All lines starting with # will be stripped.\n#\n")
+ AppendToFile(msg, self.Config("CHANGELOG_ENTRY_FILE"))
+
+ # Include unformatted commit messages as a reference in a comment.
+ comment_body = MakeComment(MakeChangeLogBody(commit_messages))
+ AppendToFile(comment_body, self.Config("CHANGELOG_ENTRY_FILE"))
+
+
+class EditChangeLog(Step):
+ MESSAGE = "Edit ChangeLog entry."
+
+ def RunStep(self):
+ print ("Please press <Return> to have your EDITOR open the ChangeLog "
+ "entry, then edit its contents to your liking. When you're done, "
+ "save the file and exit your EDITOR. ")
+ self.ReadLine(default="")
+ self.Editor(self.Config("CHANGELOG_ENTRY_FILE"))
+
+ # Strip comments and reformat with correct indentation.
+ changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE")).rstrip()
+ changelog_entry = StripComments(changelog_entry)
+ changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
+ changelog_entry = changelog_entry.lstrip()
+
+ if changelog_entry == "": # pragma: no cover
+ self.Die("Empty ChangeLog entry.")
+
+ # Safe new change log for adding it later to the candidates patch.
+ TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE"))
+
+
+class MakeBranch(Step):
+ MESSAGE = "Create the branch."
+
+ def RunStep(self):
+ self.Git("reset --hard origin/master")
+ self.Git("checkout -b work-branch %s" % self["pending_hash"])
+ self.GitCheckoutFile(CHANGELOG_FILE, self["latest_version"])
+ self.GitCheckoutFile(VERSION_FILE, self["latest_version"])
+
+
+class AddChangeLog(Step):
+ MESSAGE = "Add ChangeLog changes to release branch."
+
+ def RunStep(self):
+ changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
+ old_change_log = FileToText(os.path.join(self.default_cwd, CHANGELOG_FILE))
+ new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
+ TextToFile(new_change_log, os.path.join(self.default_cwd, CHANGELOG_FILE))
+
+
+class SetVersion(Step):
+ MESSAGE = "Set correct version for candidates."
+
+ def RunStep(self):
+ self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
+
+
+class CommitBranch(Step):
+ MESSAGE = "Commit version and changelog to new branch."
+
+ def RunStep(self):
+ # Convert the ChangeLog entry to commit message format.
+ text = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
+
+ # Remove date and trailing white space.
+ text = re.sub(r"^%s: " % self["date"], "", text.rstrip())
+
+ # Remove indentation and merge paragraphs into single long lines, keeping
+ # empty lines between them.
+ def SplitMapJoin(split_text, fun, join_text):
+ return lambda text: join_text.join(map(fun, text.split(split_text)))
+ text = SplitMapJoin(
+ "\n\n", SplitMapJoin("\n", str.strip, " "), "\n\n")(text)
+
+ if not text: # pragma: no cover
+ self.Die("Commit message editing failed.")
+ self["commit_title"] = text.splitlines()[0]
+ TextToFile(text, self.Config("COMMITMSG_FILE"))
+
+ self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
+ os.remove(self.Config("COMMITMSG_FILE"))
+ os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
+
+
+class PushBranch(Step):
+ MESSAGE = "Push changes."
+
+ def RunStep(self):
+ pushspecs = [
+ "refs/heads/work-branch:refs/pending/heads/%s" % self["version"],
+ "%s:refs/pending-tags/heads/%s" %
+ (self["pending_hash"], self["version"]),
+ "%s:refs/heads/%s" % (self["push_hash"], self["version"]),
+ ]
+ cmd = "push origin %s" % " ".join(pushspecs)
+ if self._options.dry_run:
+ print "Dry run. Command:\ngit %s" % cmd
+ else:
+ self.Git(cmd)
+
+
+class TagRevision(Step):
+ MESSAGE = "Tag the new revision."
+
+ def RunStep(self):
+ if self._options.dry_run:
+ print ("Dry run. Tagging \"%s\" with %s" %
+ (self["commit_title"], self["version"]))
+ else:
+ self.vc.Tag(self["version"],
+ "origin/%s" % self["version"],
+ self["commit_title"])
+
+
+class CleanUp(Step):
+ MESSAGE = "Done!"
+
+ def RunStep(self):
+ print("Congratulations, you have successfully created version %s."
+ % self["version"])
+
+ self.GitCheckout("origin/master")
+ self.DeleteBranch("work-branch")
+ self.Git("gc")
+
+
+class CreateRelease(ScriptsBase):
+ def _PrepareOptions(self, parser):
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument("-f", "--force",
+ help="Don't prompt the user.",
+ default=True, action="store_true")
+ group.add_argument("-m", "--manual",
+ help="Prompt the user at every important step.",
+ default=False, action="store_true")
+ parser.add_argument("-R", "--revision",
+ help="The git commit ID to push (defaults to HEAD).")
+
+ def _ProcessOptions(self, options): # pragma: no cover
+ if not options.author or not options.reviewer:
+ print "Reviewer (-r) and author (-a) are required."
+ return False
+ return True
+
+ def _Config(self):
+ return {
+ "PERSISTFILE_BASENAME": "/tmp/create-releases-tempfile",
+ "CHANGELOG_ENTRY_FILE":
+ "/tmp/v8-create-releases-tempfile-changelog-entry",
+ "COMMITMSG_FILE": "/tmp/v8-create-releases-tempfile-commitmsg",
+ }
+
+ def _Steps(self):
+ return [
+ Preparation,
+ PrepareBranchRevision,
+ IncrementVersion,
+ DetectLastRelease,
+ PrepareChangeLog,
+ EditChangeLog,
+ MakeBranch,
+ AddChangeLog,
+ SetVersion,
+ CommitBranch,
+ PushBranch,
+ TagRevision,
+ CleanUp,
+ ]
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(CreateRelease().Run())
diff --git a/deps/v8/tools/push-to-trunk/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index 3d2a9ef87d..3d2a9ef87d 100644
--- a/deps/v8/tools/push-to-trunk/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
diff --git a/deps/v8/tools/push-to-trunk/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index 9e7f1fb4f5..7aa9fb6ab6 100755
--- a/deps/v8/tools/push-to-trunk/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -47,9 +47,9 @@ class Preparation(Step):
open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
self.InitialEnvironmentChecks(self.default_cwd)
- if self._options.revert_bleeding_edge:
- # FIXME(machenbach): Make revert bleeding_edge obsolete?
- self["merge_to_branch"] = "bleeding_edge"
+ if self._options.revert_master:
+ # FIXME(machenbach): Make revert master obsolete?
+ self["merge_to_branch"] = "master"
elif self._options.branch:
self["merge_to_branch"] = self._options.branch
else: # pragma: no cover
@@ -111,7 +111,7 @@ class CreateCommitMessage(Step):
if not self["revision_list"]: # pragma: no cover
self.Die("Revision list is empty.")
- if self._options.revert and not self._options.revert_bleeding_edge:
+ if self._options.revert and not self._options.revert_master:
action_text = "Rollback of %s"
else:
action_text = "Merged %s"
@@ -156,7 +156,7 @@ class PrepareVersion(Step):
MESSAGE = "Prepare version file."
def RunStep(self):
- if self._options.revert_bleeding_edge:
+ if self._options.revert_master:
return
# This is used to calculate the patch level increment.
self.ReadAndPersistVersion()
@@ -166,7 +166,7 @@ class IncrementVersion(Step):
MESSAGE = "Increment version number."
def RunStep(self):
- if self._options.revert_bleeding_edge:
+ if self._options.revert_master:
return
new_patch = str(int(self["patch"]) + 1)
if self.Confirm("Automatically increment PATCH_LEVEL? (Saying 'n' will "
@@ -192,7 +192,7 @@ class CommitLocal(Step):
def RunStep(self):
# Add a commit message title.
- if self._options.revert and self._options.revert_bleeding_edge:
+ if self._options.revert and self._options.revert_master:
# TODO(machenbach): Find a better convention if multiple patches are
# reverted in one CL.
self["commit_title"] = "Revert on master"
@@ -218,7 +218,7 @@ class TagRevision(Step):
MESSAGE = "Create the tag."
def RunStep(self):
- if self._options.revert_bleeding_edge:
+ if self._options.revert_master:
return
print "Creating tag %s" % self["version"]
self.vc.Tag(self["version"],
@@ -231,7 +231,7 @@ class CleanUp(Step):
def RunStep(self):
self.CommonCleanup()
- if not self._options.revert_bleeding_edge:
+ if not self._options.revert_master:
print "*** SUMMARY ***"
print "version: %s" % self["version"]
print "branch: %s" % self["merge_to_branch"]
@@ -242,13 +242,13 @@ class CleanUp(Step):
class MergeToBranch(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
- "bleeding_edge to other branches, including trunk.")
+ "master to other branches, including candidates.")
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--branch", help="The branch to merge to.")
- group.add_argument("-R", "--revert-bleeding-edge",
- help="Revert specified patches from bleeding edge.",
+ group.add_argument("-R", "--revert-master",
+ help="Revert specified patches from master.",
default=False, action="store_true")
parser.add_argument("revisions", nargs="*",
help="The revisions to merge.")
@@ -264,7 +264,7 @@ class MergeToBranch(ScriptsBase):
help="A patch file to apply as part of the merge.")
def _ProcessOptions(self, options):
- # TODO(machenbach): Add a test that covers revert from bleeding_edge
+ # TODO(machenbach): Add a test that covers revert from master
if len(options.revisions) < 1:
if not options.patch:
print "Either a patch file or revision numbers must be specified"
diff --git a/deps/v8/tools/push-to-trunk/push_to_trunk.py b/deps/v8/tools/release/push_to_candidates.py
index 6e821f2a0b..750794eabd 100755
--- a/deps/v8/tools/push-to-trunk/push_to_trunk.py
+++ b/deps/v8/tools/release/push_to_candidates.py
@@ -35,7 +35,7 @@ import urllib2
from common_includes import *
PUSH_MSG_GIT_SUFFIX = " (based on %s)"
-PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
+
class Preparation(Step):
MESSAGE = "Preparation."
@@ -44,12 +44,12 @@ class Preparation(Step):
self.InitialEnvironmentChecks(self.default_cwd)
self.CommonPrepare()
- if(self["current_branch"] == self.Config("TRUNKBRANCH")
+ if(self["current_branch"] == self.Config("CANDIDATESBRANCH")
or self["current_branch"] == self.Config("BRANCHNAME")):
print "Warning: Script started on branch %s" % self["current_branch"]
self.PrepareBranch()
- self.DeleteBranch(self.Config("TRUNKBRANCH"))
+ self.DeleteBranch(self.Config("CANDIDATESBRANCH"))
class FreshBranch(Step):
@@ -72,92 +72,29 @@ class PreparePushRevision(Step):
self.Die("Could not determine the git hash for the push.")
-class DetectLastPush(Step):
- MESSAGE = "Detect commit ID of last push to trunk."
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
def RunStep(self):
- last_push = self._options.last_push or self.FindLastTrunkPush()
- while True:
- # Print assumed commit, circumventing git's pager.
- print self.GitLog(n=1, git_hash=last_push)
- if self.Confirm("Is the commit printed above the last push to trunk?"):
- break
- last_push = self.FindLastTrunkPush(parent_hash=last_push)
-
- if self._options.last_bleeding_edge:
- # Read the bleeding edge revision of the last push from a command-line
- # option.
- last_push_bleeding_edge = self._options.last_bleeding_edge
- else:
- # Retrieve the bleeding edge revision of the last push from the text in
- # the push commit message.
- last_push_title = self.GitLog(n=1, format="%s", git_hash=last_push)
- last_push_bleeding_edge = PUSH_MSG_GIT_RE.match(
- last_push_title).group("git_rev")
-
- if not last_push_bleeding_edge: # pragma: no cover
- self.Die("Could not retrieve bleeding edge git hash for trunk push %s"
- % last_push)
-
- # This points to the git hash of the last push on trunk.
- self["last_push_trunk"] = last_push
- # This points to the last bleeding_edge revision that went into the last
- # push.
- # TODO(machenbach): Do we need a check to make sure we're not pushing a
- # revision older than the last push? If we do this, the output of the
- # current change log preparation won't make much sense.
- self["last_push_bleeding_edge"] = last_push_bleeding_edge
-
-
-# TODO(machenbach): Code similarities with bump_up_version.py. Merge after
-# turning this script into a pure git script.
-class GetCurrentBleedingEdgeVersion(Step):
- MESSAGE = "Get latest bleeding edge version."
+ latest_version = self.GetLatestVersion()
- def RunStep(self):
+ # The version file on master can be used to bump up major/minor at
+ # branch time.
self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
+ self.ReadAndPersistVersion("master_")
+ master_version = self.ArrayToVersion("master_")
- # Store latest version.
- self.ReadAndPersistVersion("latest_")
- self["latest_version"] = self.ArrayToVersion("latest_")
- print "Bleeding edge version: %s" % self["latest_version"]
-
-
-class IncrementVersion(Step):
- MESSAGE = "Increment version number."
-
- def RunStep(self):
- # Retrieve current version from last trunk push.
- self.GitCheckoutFile(VERSION_FILE, self["last_push_trunk"])
- self.ReadAndPersistVersion()
- self["trunk_version"] = self.ArrayToVersion("")
-
- if self["latest_build"] == "9999": # pragma: no cover
- # If version control on bleeding edge was switched off, just use the last
- # trunk version.
- self["latest_version"] = self["trunk_version"]
-
- if SortingKey(self["trunk_version"]) < SortingKey(self["latest_version"]):
- # If the version on bleeding_edge is newer than on trunk, use it.
- self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
- self.ReadAndPersistVersion()
-
- if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
- "fire up your EDITOR on %s so you can make arbitrary "
- "changes. When you're done, save the file and exit your "
- "EDITOR.)" % VERSION_FILE)):
-
- text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
- text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
- r"\g<space>%s" % str(int(self["build"]) + 1),
- text)
- TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
- else:
- self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
+ # Use the highest version from master or from tags to determine the new
+ # version.
+ authoritative_version = sorted(
+ [master_version, latest_version], key=SortingKey)[1]
+ self.StoreVersion(authoritative_version, "authoritative_")
# Variables prefixed with 'new_' contain the new version numbers for the
- # ongoing trunk push.
- self.ReadAndPersistVersion("new_")
+ # ongoing candidates push.
+ self["new_major"] = self["authoritative_major"]
+ self["new_minor"] = self["authoritative_minor"]
+ self["new_build"] = str(int(self["authoritative_build"]) + 1)
# Make sure patch level is 0 in a new push.
self["new_patch"] = "0"
@@ -166,6 +103,18 @@ class IncrementVersion(Step):
self["new_minor"],
self["new_build"])
+ print ("Incremented version to %s" % self["version"])
+
+
+class DetectLastRelease(Step):
+ MESSAGE = "Detect commit ID of last release base."
+
+ def RunStep(self):
+ if self._options.last_master:
+ self["last_push_master"] = self._options.last_master
+ else:
+ self["last_push_master"] = self.GetLatestReleaseBase()
+
class PrepareChangeLog(Step):
MESSAGE = "Prepare raw ChangeLog entry."
@@ -193,7 +142,7 @@ class PrepareChangeLog(Step):
output = "%s: Version %s\n\n" % (self["date"], self["version"])
TextToFile(output, self.Config("CHANGELOG_ENTRY_FILE"))
commits = self.GitLog(format="%H",
- git_hash="%s..%s" % (self["last_push_bleeding_edge"],
+ git_hash="%s..%s" % (self["last_push_master"],
self["push_hash"]))
# Cache raw commit messages.
@@ -239,7 +188,7 @@ class EditChangeLog(Step):
if changelog_entry == "": # pragma: no cover
self.Die("Empty ChangeLog entry.")
- # Safe new change log for adding it later to the trunk patch.
+ # Safe new change log for adding it later to the candidates patch.
TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE"))
@@ -286,10 +235,10 @@ class SquashCommits(Step):
class NewBranch(Step):
- MESSAGE = "Create a new branch from trunk."
+ MESSAGE = "Create a new branch from candidates."
def RunStep(self):
- self.GitCreateBranch(self.Config("TRUNKBRANCH"),
+ self.GitCreateBranch(self.Config("CANDIDATESBRANCH"),
self.vc.RemoteCandidateBranch())
@@ -299,16 +248,41 @@ class ApplyChanges(Step):
def RunStep(self):
self.ApplyPatch(self.Config("PATCH_FILE"))
os.remove(self.Config("PATCH_FILE"))
+ # The change log has been modified by the patch. Reset it to the version
+ # on candidates and apply the exact changes determined by this
+ # PrepareChangeLog step above.
+ self.GitCheckoutFile(CHANGELOG_FILE, self.vc.RemoteCandidateBranch())
+ # The version file has been modified by the patch. Reset it to the version
+ # on candidates.
+ self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteCandidateBranch())
+
+
+class CommitSquash(Step):
+ MESSAGE = "Commit to local candidates branch."
+
+ def RunStep(self):
+ # Make a first commit with a slightly different title to not confuse
+ # the tagging.
+ msg = FileToText(self.Config("COMMITMSG_FILE")).splitlines()
+ msg[0] = msg[0].replace("(based on", "(squashed - based on")
+ self.GitCommit(message = "\n".join(msg))
+
+
+class PrepareVersionBranch(Step):
+ MESSAGE = "Prepare new branch to commit version and changelog file."
+
+ def RunStep(self):
+ self.GitCheckout("master")
+ self.Git("fetch")
+ self.GitDeleteBranch(self.Config("CANDIDATESBRANCH"))
+ self.GitCreateBranch(self.Config("CANDIDATESBRANCH"),
+ self.vc.RemoteCandidateBranch())
class AddChangeLog(Step):
- MESSAGE = "Add ChangeLog changes to trunk branch."
+ MESSAGE = "Add ChangeLog changes to candidates branch."
def RunStep(self):
- # The change log has been modified by the patch. Reset it to the version
- # on trunk and apply the exact changes determined by this PrepareChangeLog
- # step above.
- self.GitCheckoutFile(CHANGELOG_FILE, self.vc.RemoteCandidateBranch())
changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
old_change_log = FileToText(os.path.join(self.default_cwd, CHANGELOG_FILE))
new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
@@ -317,17 +291,14 @@ class AddChangeLog(Step):
class SetVersion(Step):
- MESSAGE = "Set correct version for trunk."
+ MESSAGE = "Set correct version for candidates."
def RunStep(self):
- # The version file has been modified by the patch. Reset it to the version
- # on trunk and apply the correct version.
- self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteCandidateBranch())
self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
-class CommitTrunk(Step):
- MESSAGE = "Commit to local trunk branch."
+class CommitCandidate(Step):
+ MESSAGE = "Commit version and changelog to local candidates branch."
def RunStep(self):
self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
@@ -341,7 +312,7 @@ class SanityCheck(Step):
# TODO(machenbach): Run presubmit script here as it is now missing in the
# prepare push process.
if not self.Confirm("Please check if your local checkout is sane: Inspect "
- "%s, compile, run tests. Do you want to commit this new trunk "
+ "%s, compile, run tests. Do you want to commit this new candidates "
"revision to the repository?" % VERSION_FILE):
self.Die("Execution canceled.") # pragma: no cover
@@ -365,16 +336,16 @@ class CleanUp(Step):
MESSAGE = "Done!"
def RunStep(self):
- print("Congratulations, you have successfully created the trunk "
+ print("Congratulations, you have successfully created the candidates "
"revision %s."
% self["version"])
self.CommonCleanup()
- if self.Config("TRUNKBRANCH") != self["current_branch"]:
- self.GitDeleteBranch(self.Config("TRUNKBRANCH"))
+ if self.Config("CANDIDATESBRANCH") != self["current_branch"]:
+ self.GitDeleteBranch(self.Config("CANDIDATESBRANCH"))
-class PushToTrunk(ScriptsBase):
+class PushToCandidates(ScriptsBase):
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group()
group.add_argument("-f", "--force",
@@ -383,12 +354,12 @@ class PushToTrunk(ScriptsBase):
group.add_argument("-m", "--manual",
help="Prompt the user at every important step.",
default=False, action="store_true")
- parser.add_argument("-b", "--last-bleeding-edge",
- help=("The git commit ID of the last bleeding edge "
- "revision that was pushed to trunk. This is "
- "used for the auto-generated ChangeLog entry."))
+ parser.add_argument("-b", "--last-master",
+ help=("The git commit ID of the last master "
+ "revision that was pushed to candidates. This is"
+ " used for the auto-generated ChangeLog entry."))
parser.add_argument("-l", "--last-push",
- help="The git commit ID of the last push to trunk.")
+ help="The git commit ID of the last candidates push.")
parser.add_argument("-R", "--revision",
help="The git commit ID to push (defaults to HEAD).")
@@ -406,11 +377,12 @@ class PushToTrunk(ScriptsBase):
def _Config(self):
return {
"BRANCHNAME": "prepare-push",
- "TRUNKBRANCH": "trunk-push",
- "PERSISTFILE_BASENAME": "/tmp/v8-push-to-trunk-tempfile",
- "CHANGELOG_ENTRY_FILE": "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
- "PATCH_FILE": "/tmp/v8-push-to-trunk-tempfile-patch-file",
- "COMMITMSG_FILE": "/tmp/v8-push-to-trunk-tempfile-commitmsg",
+ "CANDIDATESBRANCH": "candidates-push",
+ "PERSISTFILE_BASENAME": "/tmp/v8-push-to-candidates-tempfile",
+ "CHANGELOG_ENTRY_FILE":
+ "/tmp/v8-push-to-candidates-tempfile-changelog-entry",
+ "PATCH_FILE": "/tmp/v8-push-to-candidates-tempfile-patch-file",
+ "COMMITMSG_FILE": "/tmp/v8-push-to-candidates-tempfile-commitmsg",
}
def _Steps(self):
@@ -418,19 +390,21 @@ class PushToTrunk(ScriptsBase):
Preparation,
FreshBranch,
PreparePushRevision,
- DetectLastPush,
- GetCurrentBleedingEdgeVersion,
IncrementVersion,
+ DetectLastRelease,
PrepareChangeLog,
EditChangeLog,
StragglerCommits,
SquashCommits,
NewBranch,
ApplyChanges,
+ CommitSquash,
+ SanityCheck,
+ Land,
+ PrepareVersionBranch,
AddChangeLog,
SetVersion,
- CommitTrunk,
- SanityCheck,
+ CommitCandidate,
Land,
TagRevision,
CleanUp,
@@ -438,4 +412,4 @@ class PushToTrunk(ScriptsBase):
if __name__ == "__main__": # pragma: no cover
- sys.exit(PushToTrunk().Run())
+ sys.exit(PushToCandidates().Run())
diff --git a/deps/v8/tools/push-to-trunk/releases.py b/deps/v8/tools/release/releases.py
index 1a5b15ca82..0f35e7c88f 100755
--- a/deps/v8/tools/push-to-trunk/releases.py
+++ b/deps/v8/tools/release/releases.py
@@ -3,7 +3,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# This script retrieves the history of all V8 branches and trunk revisions and
+# This script retrieves the history of all V8 branches and
# their corresponding Chromium revisions.
# Requires a chromium checkout with branch heads:
@@ -70,7 +70,7 @@ def FilterDuplicatesAndReverse(cr_releases):
"""Returns the chromium releases in reverse order filtered by v8 revision
duplicates.
- cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
+ cr_releases is a list of [cr_rev, v8_hsh] reverse-sorted by cr_rev.
"""
last = ""
result = []
@@ -87,8 +87,9 @@ def BuildRevisionRanges(cr_releases):
The ranges are comma-separated, each range has the form R1:R2. The newest
entry is the only one of the form R1, as there is no end range.
- cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
- cr_rev either refers to a chromium svn revision or a chromium branch number.
+ cr_releases is a list of [cr_rev, v8_hsh] reverse-sorted by cr_rev.
+ cr_rev either refers to a chromium commit position or a chromium branch
+ number.
"""
range_lists = {}
cr_releases = FilterDuplicatesAndReverse(cr_releases)
@@ -100,7 +101,6 @@ def BuildRevisionRanges(cr_releases):
# Assume the chromium revisions are all different.
assert cr_from[0] != cr_to[0]
- # TODO(machenbach): Subtraction is not git friendly.
ran = "%s:%d" % (cr_from[0], int(cr_to[0]) - 1)
# Collect the ranges in lists per revision.
@@ -111,7 +111,7 @@ def BuildRevisionRanges(cr_releases):
range_lists.setdefault(cr_releases[-1][1], []).append(cr_releases[-1][0])
# Stringify and comma-separate the range lists.
- return dict((rev, ", ".join(ran)) for rev, ran in range_lists.iteritems())
+ return dict((hsh, ", ".join(ran)) for hsh, ran in range_lists.iteritems())
def MatchSafe(match):
@@ -136,7 +136,7 @@ class RetrieveV8Releases(Step):
return (self._options.max_releases > 0
and len(releases) > self._options.max_releases)
- def GetBleedingEdgeGitFromPush(self, title):
+ def GetMasterHashFromPush(self, title):
return MatchSafe(PUSH_MSG_GIT_RE.match(title))
def GetMergedPatches(self, body):
@@ -161,7 +161,7 @@ class RetrieveV8Releases(Step):
def GetReleaseDict(
- self, git_hash, bleeding_edge_rev, bleeding_edge_git, branch, version,
+ self, git_hash, master_position, master_hash, branch, version,
patches, cl_body):
revision = self.GetCommitPositionNumber(git_hash)
return {
@@ -170,9 +170,9 @@ class RetrieveV8Releases(Step):
# The git revision on the branch.
"revision_git": git_hash,
# The cr commit position number on master.
- "bleeding_edge": bleeding_edge_rev,
+ "master_position": master_position,
# The same for git.
- "bleeding_edge_git": bleeding_edge_git,
+ "master_hash": master_hash,
# The branch name.
"branch": branch,
# The version for displaying in the form 3.26.3 or 3.26.3.12.
@@ -185,8 +185,8 @@ class RetrieveV8Releases(Step):
"chromium_revision": "",
# Default for easier output formatting.
"chromium_branch": "",
- # Link to the CL on code review. Trunk pushes are not uploaded, so this
- # field will be populated below with the recent roll CL link.
+ # Link to the CL on code review. Candiates pushes are not uploaded,
+ # so this field will be populated below with the recent roll CL link.
"review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)),
# Link to the commit message on google code.
"revision_link": ("https://code.google.com/p/v8/source/detail?r=%s"
@@ -207,31 +207,20 @@ class RetrieveV8Releases(Step):
else:
patches = self.GetMergedPatches(body)
- title = self.GitLog(n=1, format="%s", git_hash=git_hash)
- bleeding_edge_git = self.GetBleedingEdgeGitFromPush(title)
- bleeding_edge_position = ""
- if bleeding_edge_git:
- bleeding_edge_position = self.GetCommitPositionNumber(bleeding_edge_git)
- # TODO(machenbach): Add the commit position number.
+ if SortingKey("4.2.69") <= SortingKey(version):
+ master_hash = self.GetLatestReleaseBase(version=version)
+ else:
+ # Legacy: Before version 4.2.69, the master revision was determined
+ # by commit message.
+ title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+ master_hash = self.GetMasterHashFromPush(title)
+ master_position = ""
+ if master_hash:
+ master_position = self.GetCommitPositionNumber(master_hash)
return self.GetReleaseDict(
- git_hash, bleeding_edge_position, bleeding_edge_git, branch, version,
+ git_hash, master_position, master_hash, branch, version,
patches, body), self["patch"]
- def GetReleasesFromMaster(self):
- # TODO(machenbach): Implement this in git as soon as we tag again on
- # master.
- # tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v
- # --limit 20")
- # releases = []
- # for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
- # git_hash = self.vc.SvnGit(revision)
-
- # Add bleeding edge release. It does not contain patches or a code
- # review link, as tags are not uploaded.
- # releases.append(self.GetReleaseDict(
- # git_hash, revision, git_hash, self.vc.MasterBranch(), tag, "", ""))
- return []
-
def GetReleasesFromBranch(self, branch):
self.GitReset(self.vc.RemoteBranch(branch))
if branch == self.vc.MasterBranch():
@@ -265,28 +254,58 @@ class RetrieveV8Releases(Step):
self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
return releases
+ def GetReleaseFromRevision(self, revision):
+ releases = []
+ try:
+ if (VERSION_FILE not in self.GitChangedFiles(revision) or
+ not self.GitCheckoutFileSafe(VERSION_FILE, revision)):
+ print "Skipping revision %s" % revision
+ return [] # pragma: no cover
+
+ branches = map(
+ str.strip,
+ self.Git("branch -r --contains %s" % revision).strip().splitlines(),
+ )
+ branch = ""
+ for b in branches:
+ if b.startswith("origin/"):
+ branch = b.split("origin/")[1]
+ break
+ if b.startswith("branch-heads/"):
+ branch = b.split("branch-heads/")[1]
+ break
+ else:
+ print "Could not determine branch for %s" % revision
+
+ release, _ = self.GetRelease(revision, branch)
+ releases.append(release)
+
+ # Allow Ctrl-C interrupt.
+ except (KeyboardInterrupt, SystemExit): # pragma: no cover
+ pass
+
+ # Clean up checked-out version file.
+ self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
+ return releases
+
+
def RunStep(self):
self.GitCreateBranch(self._config["BRANCHNAME"])
- branches = self.vc.GetBranches()
releases = []
if self._options.branch == 'recent':
- # Get only recent development on trunk, beta and stable.
- if self._options.max_releases == 0: # pragma: no cover
- self._options.max_releases = 10
- beta, stable = SortBranches(branches)[0:2]
- releases += self.GetReleasesFromBranch(stable)
- releases += self.GetReleasesFromBranch(beta)
- releases += self.GetReleasesFromBranch(self.vc.CandidateBranch())
- releases += self.GetReleasesFromBranch(self.vc.MasterBranch())
+ # List every release from the last 7 days.
+ revisions = self.GetRecentReleases(max_age=7 * 24 * 60 * 60)
+ for revision in revisions:
+ releases += self.GetReleaseFromRevision(revision)
elif self._options.branch == 'all': # pragma: no cover
# Retrieve the full release history.
- for branch in branches:
+ for branch in self.vc.GetBranches():
releases += self.GetReleasesFromBranch(branch)
releases += self.GetReleasesFromBranch(self.vc.CandidateBranch())
releases += self.GetReleasesFromBranch(self.vc.MasterBranch())
else: # pragma: no cover
# Retrieve history for a specified branch.
- assert self._options.branch in (branches +
+ assert self._options.branch in (self.vc.GetBranches() +
[self.vc.CandidateBranch(), self.vc.MasterBranch()])
releases += self.GetReleasesFromBranch(self._options.branch)
@@ -331,18 +350,12 @@ class RetrieveChromiumV8Releases(Step):
def RunStep(self):
cwd = self._options.chromium
- releases = filter(
- lambda r: r["branch"] in [self.vc.CandidateBranch(),
- self.vc.MasterBranch()],
- self["releases"])
- if not releases: # pragma: no cover
- print "No releases detected. Skipping chromium history."
- return True
# Update v8 checkout in chromium.
self.GitFetchOrigin(cwd=os.path.join(cwd, "v8"))
- oldest_v8_rev = int(releases[-1]["revision"])
+ # All v8 revisions we are interested in.
+ releases_dict = dict((r["revision_git"], r) for r in self["releases"])
cr_releases = []
try:
@@ -357,13 +370,12 @@ class RetrieveChromiumV8Releases(Step):
if match:
cr_rev = self.GetCommitPositionNumber(git_hash, cwd=cwd)
if cr_rev:
- v8_rev = ConvertToCommitNumber(self, match.group(1))
- cr_releases.append([cr_rev, v8_rev])
+ v8_hsh = match.group(1)
+ cr_releases.append([cr_rev, v8_hsh])
- # Stop after reaching beyond the last v8 revision we want to update.
- # We need a small buffer for possible revert/reland frenzies.
- # TODO(machenbach): Subtraction is not git friendly.
- if int(v8_rev) < oldest_v8_rev - 100:
+ # Stop as soon as we find a v8 revision that we didn't fetch in the
+ # v8-revision-retrieval part above (i.e. a revision that's too old).
+ if v8_hsh not in releases_dict:
break # pragma: no cover
# Allow Ctrl-C interrupt.
@@ -373,11 +385,11 @@ class RetrieveChromiumV8Releases(Step):
# Clean up.
self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
- # Add the chromium ranges to the v8 trunk and bleeding_edge releases.
+ # Add the chromium ranges to the v8 candidates and master releases.
all_ranges = BuildRevisionRanges(cr_releases)
- releases_dict = dict((r["revision"], r) for r in releases)
- for revision, ranges in all_ranges.iteritems():
- releases_dict.get(revision, {})["chromium_revision"] = ranges
+
+ for hsh, ranges in all_ranges.iteritems():
+ releases_dict.get(hsh, {})["chromium_revision"] = ranges
# TODO(machenbach): Unify common code with method above.
@@ -386,13 +398,9 @@ class RietrieveChromiumBranches(Step):
def RunStep(self):
cwd = self._options.chromium
- trunk_releases = filter(lambda r: r["branch"] == self.vc.CandidateBranch(),
- self["releases"])
- if not trunk_releases: # pragma: no cover
- print "No trunk releases detected. Skipping chromium history."
- return True
- oldest_v8_rev = int(trunk_releases[-1]["revision"])
+ # All v8 revisions we are interested in.
+ releases_dict = dict((r["revision_git"], r) for r in self["releases"])
# Filter out irrelevant branches.
branches = filter(lambda r: re.match(r"branch-heads/\d+", r),
@@ -414,13 +422,12 @@ class RietrieveChromiumBranches(Step):
deps = FileToText(os.path.join(cwd, "DEPS"))
match = DEPS_RE.search(deps)
if match:
- v8_rev = ConvertToCommitNumber(self, match.group(1))
- cr_branches.append([str(branch), v8_rev])
+ v8_hsh = match.group(1)
+ cr_branches.append([str(branch), v8_hsh])
- # Stop after reaching beyond the last v8 revision we want to update.
- # We need a small buffer for possible revert/reland frenzies.
- # TODO(machenbach): Subtraction is not git friendly.
- if int(v8_rev) < oldest_v8_rev - 100:
+ # Stop as soon as we find a v8 revision that we didn't fetch in the
+ # v8-revision-retrieval part above (i.e. a revision that's too old).
+ if v8_hsh not in releases_dict:
break # pragma: no cover
# Allow Ctrl-C interrupt.
@@ -430,11 +437,10 @@ class RietrieveChromiumBranches(Step):
# Clean up.
self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
- # Add the chromium branches to the v8 trunk releases.
+ # Add the chromium branches to the v8 candidate releases.
all_ranges = BuildRevisionRanges(cr_branches)
- trunk_dict = dict((r["revision"], r) for r in trunk_releases)
for revision, ranges in all_ranges.iteritems():
- trunk_dict.get(revision, {})["chromium_branch"] = ranges
+ releases_dict.get(revision, {})["chromium_branch"] = ranges
class CleanUp(Step):
@@ -471,7 +477,8 @@ class Releases(ScriptsBase):
parser.add_argument("-b", "--branch", default="recent",
help=("The branch to analyze. If 'all' is specified, "
"analyze all branches. If 'recent' (default) "
- "is specified, track beta, stable and trunk."))
+ "is specified, track beta, stable and "
+ "candidates."))
parser.add_argument("-c", "--chromium",
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
diff --git a/deps/v8/tools/push-to-trunk/script_test.py b/deps/v8/tools/release/script_test.py
index cbb2134f6d..cbb2134f6d 100755
--- a/deps/v8/tools/push-to-trunk/script_test.py
+++ b/deps/v8/tools/release/script_test.py
diff --git a/deps/v8/tools/push-to-trunk/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index db702a3ae1..3beddfd936 100644
--- a/deps/v8/tools/push-to-trunk/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -33,34 +33,33 @@ import traceback
import unittest
import auto_push
-from auto_push import CheckLastPush
+from auto_push import LastReleaseBailout
import auto_roll
import common_includes
from common_includes import *
+import create_release
+from create_release import CreateRelease
import merge_to_branch
from merge_to_branch import *
-import push_to_trunk
-from push_to_trunk import *
+import push_to_candidates
+from push_to_candidates import *
import chromium_roll
from chromium_roll import ChromiumRoll
import releases
from releases import Releases
-import bump_up_version
-from bump_up_version import BumpUpVersion
-from bump_up_version import LastChangeBailout
-from bump_up_version import LKGRVersionUpToDateBailout
from auto_tag import AutoTag
TEST_CONFIG = {
"DEFAULT_CWD": None,
"BRANCHNAME": "test-prepare-push",
- "TRUNKBRANCH": "test-trunk-push",
- "PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-trunk-tempfile",
- "CHANGELOG_ENTRY_FILE": "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
- "PATCH_FILE": "/tmp/test-v8-push-to-trunk-tempfile-patch",
- "COMMITMSG_FILE": "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
- "CHROMIUM": "/tmp/test-v8-push-to-trunk-tempfile-chromium",
+ "CANDIDATESBRANCH": "test-candidates-push",
+ "PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-candidates-tempfile",
+ "CHANGELOG_ENTRY_FILE":
+ "/tmp/test-v8-push-to-candidates-tempfile-changelog-entry",
+ "PATCH_FILE": "/tmp/test-v8-push-to-candidates-tempfile-patch",
+ "COMMITMSG_FILE": "/tmp/test-v8-push-to-candidates-tempfile-commitmsg",
+ "CHROMIUM": "/tmp/test-v8-push-to-candidates-tempfile-chromium",
"SETTINGS_LOCATION": None,
"ALREADY_MERGING_SENTINEL_FILE":
"/tmp/test-merge-to-branch-tempfile-already-merging",
@@ -355,14 +354,14 @@ class ScriptTest(unittest.TestCase):
return name
- def WriteFakeVersionFile(self, minor=22, build=4, patch=0):
+ def WriteFakeVersionFile(self, major=3, minor=22, build=4, patch=0):
version_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE)
if not os.path.exists(os.path.dirname(version_file)):
os.makedirs(os.path.dirname(version_file))
with open(version_file, "w") as f:
f.write(" // Some line...\n")
f.write("\n")
- f.write("#define MAJOR_VERSION 3\n")
+ f.write("#define MAJOR_VERSION %s\n" % major)
f.write("#define MINOR_VERSION %s\n" % minor)
f.write("#define BUILD_NUMBER %s\n" % build)
f.write("#define PATCH_LEVEL %s\n" % patch)
@@ -376,7 +375,7 @@ class ScriptTest(unittest.TestCase):
config=TEST_CONFIG, side_effect_handler=self,
options=options)
- def RunStep(self, script=PushToTrunk, step_class=Step, args=None):
+ def RunStep(self, script=PushToCandidates, step_class=Step, args=None):
"""Convenience wrapper."""
args = args if args is not None else ["-m"]
return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
@@ -410,7 +409,7 @@ class ScriptTest(unittest.TestCase):
return "1999-07-31"
def GetUTCStamp(self):
- return "100000"
+ return "1000000"
def Expect(self, *args):
"""Convenience wrapper."""
@@ -540,7 +539,7 @@ class ScriptTest(unittest.TestCase):
Cmd("git log -1 --format=%H HEAD", "push_hash")
])
- self.RunStep(PushToTrunk, PreparePushRevision)
+ self.RunStep(PushToCandidates, PreparePushRevision)
self.assertEquals("push_hash", self._state["push_hash"])
def testPrepareChangeLog(self):
@@ -567,10 +566,10 @@ class ScriptTest(unittest.TestCase):
Cmd("git log -1 --format=%an rev4", "author4@chromium.org"),
])
- self._state["last_push_bleeding_edge"] = "1234"
+ self._state["last_push_master"] = "1234"
self._state["push_hash"] = "push_hash"
self._state["version"] = "3.22.5"
- self.RunStep(PushToTrunk, PrepareChangeLog)
+ self.RunStep(PushToCandidates, PrepareChangeLog)
actual_cl = FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
@@ -611,27 +610,30 @@ class ScriptTest(unittest.TestCase):
Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""),
])
- self.RunStep(PushToTrunk, EditChangeLog)
+ self.RunStep(PushToCandidates, EditChangeLog)
self.assertEquals("New\n Lines",
FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"]))
- # Version on trunk: 3.22.4.0. Version on master (bleeding_edge): 3.22.6.
- # Make sure that the increment is 3.22.7.0.
- def testIncrementVersion(self):
- self.WriteFakeVersionFile()
- self._state["last_push_trunk"] = "hash1"
- self._state["latest_build"] = "6"
- self._state["latest_version"] = "3.22.6.0"
+ TAGS = """
+4425.0
+0.0.0.0
+3.9.6
+3.22.4
+test_tag
+"""
+ # Version as tag: 3.22.4.0. Version on master: 3.22.6.
+ # Make sure that the latest version is 3.22.6.0.
+ def testIncrementVersion(self):
self.Expect([
- Cmd("git checkout -f hash1 -- src/version.cc", ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
Cmd("git checkout -f origin/master -- src/version.cc",
- "", cb=lambda: self.WriteFakeVersionFile(22, 6)),
- RL("Y"), # Increment build number.
+ "", cb=lambda: self.WriteFakeVersionFile(3, 22, 6)),
])
- self.RunStep(PushToTrunk, IncrementVersion)
+ self.RunStep(PushToCandidates, IncrementVersion)
self.assertEquals("3", self._state["new_major"])
self.assertEquals("22", self._state["new_minor"])
@@ -650,7 +652,7 @@ class ScriptTest(unittest.TestCase):
self._state["push_hash"] = "hash1"
self._state["date"] = "1999-11-11"
- self.RunStep(PushToTrunk, SquashCommits)
+ self.RunStep(PushToCandidates, SquashCommits)
self.assertEquals(FileToText(TEST_CONFIG["COMMITMSG_FILE"]), expected_msg)
patch = FileToText(TEST_CONFIG["PATCH_FILE"])
@@ -704,40 +706,48 @@ Performance and stability improvements on all platforms."""
])
FakeScript(fake_config, self).Run(["--work-dir", work_dir])
- def _PushToTrunk(self, force=False, manual=False):
+ def _PushToCandidates(self, force=False, manual=False):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- # The version file on bleeding edge has build level 5, while the version
- # file from trunk has build level 4.
+ # The version file on master has build level 5, while the version
+ # file from candidates has build level 4.
self.WriteFakeVersionFile(build=5)
TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
- bleeding_edge_change_log = "2014-03-17: Sentinel\n"
- TextToFile(bleeding_edge_change_log,
+ master_change_log = "2014-03-17: Sentinel\n"
+ TextToFile(master_change_log,
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
os.environ["EDITOR"] = "vi"
+ commit_msg_squashed = """Version 3.22.5 (squashed - based on push_hash)
+
+Log text 1 (issue 321).
+
+Performance and stability improvements on all platforms."""
+
+ commit_msg = """Version 3.22.5 (based on push_hash)
+
+Log text 1 (issue 321).
+
+Performance and stability improvements on all platforms."""
+
def ResetChangeLog():
- """On 'git co -b new_branch svn/trunk', and 'git checkout -- ChangeLog',
- the ChangLog will be reset to its content on trunk."""
- trunk_change_log = """1999-04-05: Version 3.22.4
+ """On 'git co -b new_branch origin/candidates',
+ and 'git checkout -- ChangeLog',
+ the ChangLog will be reset to its content on candidates."""
+ candidates_change_log = """1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n"""
- TextToFile(trunk_change_log,
+ TextToFile(candidates_change_log,
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
- def ResetToTrunk():
+ def ResetToCandidates():
ResetChangeLog()
self.WriteFakeVersionFile()
- def CheckSVNCommit():
+ def CheckVersionCommit():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
- self.assertEquals(
-"""Version 3.22.5 (based on push_hash)
-
-Log text 1 (issue 321).
-
-Performance and stability improvements on all platforms.""", commit)
+ self.assertEquals(commit_msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
@@ -746,7 +756,8 @@ Performance and stability improvements on all platforms.""", commit)
self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
- # Check that the change log on the trunk branch got correctly modified.
+ # Check that the change log on the candidates branch got correctly
+ # modified.
change_log = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertEquals(
@@ -773,26 +784,14 @@ Performance and stability improvements on all platforms.""", commit)
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd(("git new-branch %s --upstream origin/master" %
- TEST_CONFIG["BRANCHNAME"]),
- ""),
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
- "origin/candidates"), "hash2\n"),
- Cmd("git log -1 hash2", "Log message\n"),
- ]
- if manual:
- expectations.append(RL("Y")) # Confirm last push.
- expectations += [
- Cmd("git log -1 --format=%s hash2",
- "Version 3.4.5 (based on abc3)\n"),
+ TEST_CONFIG["BRANCHNAME"]), ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
Cmd("git checkout -f origin/master -- src/version.cc",
"", cb=self.WriteFakeVersionFile),
- Cmd("git checkout -f hash2 -- src/version.cc", "",
- cb=self.WriteFakeVersionFile),
- ]
- if manual:
- expectations.append(RL("")) # Increment build number.
- expectations += [
+ Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+ Cmd("git log -1 --format=%s release_hash",
+ "Version 3.22.4 (based on abc3)\n"),
Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
@@ -808,19 +807,26 @@ Performance and stability improvements on all platforms.""", commit)
Cmd("git checkout -f origin/master", ""),
Cmd("git diff origin/candidates push_hash", "patch content\n"),
Cmd(("git new-branch %s --upstream origin/candidates" %
- TEST_CONFIG["TRUNKBRANCH"]), "", cb=ResetToTrunk),
+ TEST_CONFIG["CANDIDATESBRANCH"]), "", cb=ResetToCandidates),
Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
Cmd("git checkout -f origin/candidates -- ChangeLog", "",
cb=ResetChangeLog),
Cmd("git checkout -f origin/candidates -- src/version.cc", "",
cb=self.WriteFakeVersionFile),
- Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
- cb=CheckSVNCommit),
+ Cmd("git commit -am \"%s\"" % commit_msg_squashed, ""),
]
if manual:
expectations.append(RL("Y")) # Sanity check.
expectations += [
Cmd("git cl land -f --bypass-hooks", ""),
+ Cmd("git checkout -f master", ""),
+ Cmd("git fetch", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["CANDIDATESBRANCH"], ""),
+ Cmd(("git new-branch %s --upstream origin/candidates" %
+ TEST_CONFIG["CANDIDATESBRANCH"]), "", cb=ResetToCandidates),
+ Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
+ cb=CheckVersionCommit),
+ Cmd("git cl land -f --bypass-hooks", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
"\"Version 3.22.5 (based on push_hash)\""
@@ -829,7 +835,7 @@ Performance and stability improvements on all platforms.""", commit)
Cmd("git push origin 3.22.5", ""),
Cmd("git checkout -f some_branch", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
- Cmd("git branch -D %s" % TEST_CONFIG["TRUNKBRANCH"], ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["CANDIDATESBRANCH"], ""),
]
self.Expect(expectations)
@@ -837,7 +843,7 @@ Performance and stability improvements on all platforms.""", commit)
if force: args.append("-f")
if manual: args.append("-m")
else: args += ["-r", "reviewer@chromium.org"]
- PushToTrunk(TEST_CONFIG, self).Run(args)
+ PushToCandidates(TEST_CONFIG, self).Run(args)
cl = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
@@ -845,17 +851,125 @@ Performance and stability improvements on all platforms.""", commit)
self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
# Note: The version file is on build number 5 again in the end of this test
- # since the git command that merges to the bleeding edge branch is mocked
- # out.
+ # since the git command that merges to master is mocked out.
+
+ def testPushToCandidatesManual(self):
+ self._PushToCandidates(manual=True)
+
+ def testPushToCandidatesSemiAutomatic(self):
+ self._PushToCandidates()
+
+ def testPushToCandidatesForced(self):
+ self._PushToCandidates(force=True)
+
+ def testCreateRelease(self):
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+
+ # The version file on master has build level 5.
+ self.WriteFakeVersionFile(build=5)
+
+ master_change_log = "2014-03-17: Sentinel\n"
+ TextToFile(master_change_log,
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
+
+ commit_msg = """Version 3.22.5
+
+Log text 1 (issue 321).
+
+Performance and stability improvements on all platforms."""
+
+ def ResetChangeLog():
+ last_change_log = """1999-04-05: Version 3.22.4
+
+ Performance and stability improvements on all platforms.\n"""
+ TextToFile(last_change_log,
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
+
+
+ def CheckVersionCommit():
+ commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
+ self.assertEquals(commit_msg, commit)
+ version = FileToText(
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
+ self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
+ self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
+ self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
+ self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+
+ # Check that the change log on the candidates branch got correctly
+ # modified.
+ change_log = FileToText(
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
+ self.assertEquals(
+"""1999-07-31: Version 3.22.5
+
+ Log text 1 (issue 321).
+
+ Performance and stability improvements on all platforms.
+
+
+1999-04-05: Version 3.22.4
+
+ Performance and stability improvements on all platforms.\n""",
+ change_log)
+
+ expectations = [
+ Cmd("git fetch origin "
+ "+refs/heads/*:refs/heads/* "
+ "+refs/pending/*:refs/pending/* "
+ "+refs/pending-tags/*:refs/pending-tags/*", ""),
+ Cmd("git checkout -f origin/master", ""),
+ Cmd("git branch", ""),
+ Cmd("git log -1 --format=\"%H %T\" push_hash", "push_hash tree_hash"),
+ Cmd("git log -200 --format=\"%H %T\" refs/pending/heads/master",
+ "not_right wrong\npending_hash tree_hash\nsome other\n"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git checkout -f origin/master -- src/version.cc",
+ "", cb=self.WriteFakeVersionFile),
+ Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+ Cmd("git log -1 --format=%s release_hash", "Version 3.22.4\n"),
+ Cmd("git log -1 --format=%H release_hash^", "abc3\n"),
+ Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
+ Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
+ Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
+ Cmd("git log -1 --format=%an rev1", "author1@chromium.org\n"),
+ Cmd("git reset --hard origin/master", ""),
+ Cmd("git checkout -b work-branch pending_hash", ""),
+ Cmd("git checkout -f 3.22.4 -- ChangeLog", "", cb=ResetChangeLog),
+ Cmd("git checkout -f 3.22.4 -- src/version.cc", "",
+ cb=self.WriteFakeVersionFile),
+ Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
+ cb=CheckVersionCommit),
+ Cmd("git push origin "
+ "refs/heads/work-branch:refs/pending/heads/3.22.5 "
+ "pending_hash:refs/pending-tags/heads/3.22.5 "
+ "push_hash:refs/heads/3.22.5", ""),
+ Cmd("git fetch", ""),
+ Cmd("git log -1 --format=%H --grep="
+ "\"Version 3.22.5\" origin/3.22.5", "hsh_to_tag"),
+ Cmd("git tag 3.22.5 hsh_to_tag", ""),
+ Cmd("git push origin 3.22.5", ""),
+ Cmd("git checkout -f origin/master", ""),
+ Cmd("git branch", "* master\n work-branch\n"),
+ Cmd("git branch -D work-branch", ""),
+ Cmd("git gc", ""),
+ ]
+ self.Expect(expectations)
- def testPushToTrunkManual(self):
- self._PushToTrunk(manual=True)
+ args = ["-a", "author@chromium.org",
+ "-r", "reviewer@chromium.org",
+ "--revision", "push_hash"]
+ CreateRelease(TEST_CONFIG, self).Run(args)
- def testPushToTrunkSemiAutomatic(self):
- self._PushToTrunk()
+ cl = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
+ self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
+ self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
+ self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
- def testPushToTrunkForced(self):
- self._PushToTrunk(force=True)
+ # Note: The version file is on build number 5 again in the end of this test
+ # since the git command that merges to master is mocked out.
C_V8_22624_LOG = """V8 CL.
@@ -899,11 +1013,19 @@ def get_list():
expectations = [
Cmd("git fetch origin", ""),
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
- "origin/candidates"), "push_hash\n"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git log -1 --format=%H 3.22.4", "push_hash\n"),
+ Cmd("git log -1 --format=%s push_hash",
+ "Version 3.22.4 (based on abc)\n"),
+ Cmd("git log -1 --format=%H 3.22.4", "push_hash\n"),
Cmd("git log -1 --format=%s push_hash",
- "Version 3.22.5 (based on bleeding_edge revision r22622)\n"),
+ "Version 3.22.4 (based on abc)"),
+ Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
+ Cmd("git log -1 --format=%H 3.22.2", "last_roll_base_hash"),
+ Cmd("git log -1 --format=%s last_roll_base_hash", "Version 3.22.2"),
+ Cmd("git log -1 --format=%H last_roll_base_hash^",
+ "last_roll_master_hash"),
URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
"document.write('g_name')"),
Cmd("git status -s -uno", "", cwd=chrome_dir),
@@ -913,8 +1035,10 @@ def get_list():
Cmd("git fetch origin", ""),
Cmd("git new-branch v8-roll-push_hash", "", cwd=chrome_dir),
Cmd("roll-dep v8 push_hash", "rolled", cb=WriteDeps, cwd=chrome_dir),
- Cmd(("git commit -am \"Update V8 to version 3.22.5 "
- "(based on bleeding_edge revision r22622).\n\n"
+ Cmd(("git commit -am \"Update V8 to version 3.22.4 "
+ "(based on abc).\n\n"
+ "Summary of changes available at:\n"
+ "https://chromium.googlesource.com/v8/v8/+log/last_rol..abc\n\n"
"Please reply to the V8 sheriff c_name@chromium.org in "
"case of problems.\n\nTBR=c_name@chromium.org\" "
"--author \"author@chromium.org <author@chromium.org>\""),
@@ -926,7 +1050,8 @@ def get_list():
args = ["-a", "author@chromium.org", "-c", chrome_dir,
"--sheriff", "--googlers-mapping", googlers_mapping_py,
- "-r", "reviewer@chromium.org"]
+ "-r", "reviewer@chromium.org",
+ "--last-roll", "last_roll_hsh"]
ChromiumRoll(TEST_CONFIG, self).Run(args)
deps = FileToText(os.path.join(chrome_dir, "DEPS"))
@@ -934,34 +1059,33 @@ def get_list():
def testCheckLastPushRecently(self):
self.Expect([
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
- "origin/candidates"), "hash2\n"),
- Cmd("git log -1 --format=%s hash2",
- "Version 3.4.5 (based on abc123)\n"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+ Cmd("git log -1 --format=%s release_hash",
+ "Version 3.22.4 (based on abc3)\n"),
+ Cmd("git log --format=%H abc3..abc123", "\n"),
])
self._state["candidate"] = "abc123"
self.assertEquals(0, self.RunStep(
- auto_push.AutoPush, CheckLastPush, AUTO_PUSH_ARGS))
+ auto_push.AutoPush, LastReleaseBailout, AUTO_PUSH_ARGS))
def testAutoPush(self):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git status -s -b -uno", "## some_branch\n"),
Cmd("git fetch", ""),
- URL("https://v8-status.appspot.com/current?format=json",
- "{\"message\": \"Tree is throttled\"}"),
- Cmd("git fetch origin +refs/heads/candidate:refs/heads/candidate", ""),
- Cmd("git show-ref -s refs/heads/candidate", "abc123\n"),
- Cmd(("git log -1 --format=%H --grep=\""
- "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\""
- " origin/candidates"), "push_hash\n"),
- Cmd("git log -1 --format=%s push_hash",
- "Version 3.4.5 (based on abc101)\n"),
+ Cmd("git fetch origin +refs/heads/roll:refs/heads/roll", ""),
+ Cmd("git show-ref -s refs/heads/roll", "abc123\n"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+ Cmd("git log -1 --format=%s release_hash",
+ "Version 3.22.4 (based on abc3)\n"),
+ Cmd("git log --format=%H abc3..abc123", "some_stuff\n"),
])
auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
@@ -971,38 +1095,6 @@ def get_list():
self.assertEquals("abc123", state["candidate"])
- def testAutoPushStoppedBySettings(self):
- TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- TEST_CONFIG["SETTINGS_LOCATION"] = self.MakeEmptyTempFile()
- TextToFile("{\"enable_auto_push\": false}",
- TEST_CONFIG["SETTINGS_LOCATION"])
-
- self.Expect([
- Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
- Cmd("git fetch", ""),
- ])
-
- def RunAutoPush():
- auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
- self.assertRaises(Exception, RunAutoPush)
-
- def testAutoPushStoppedByTreeStatus(self):
- TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
-
- self.Expect([
- Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
- Cmd("git fetch", ""),
- URL("https://v8-status.appspot.com/current?format=json",
- "{\"message\": \"Tree is throttled (no push)\"}"),
- ])
-
- def RunAutoPush():
- auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
- self.assertRaises(Exception, RunAutoPush)
-
def testAutoRollExistingRoll(self):
self.Expect([
URL("https://codereview.chromium.org/search",
@@ -1034,12 +1126,9 @@ deps = {
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
- Cmd("git fetch", ""),
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
- "origin/candidates"), "push_hash\n"),
- Cmd("git log -1 --format=%B push_hash", self.C_V8_22624_LOG),
- Cmd("git log -1 --format=%B abcd123455", self.C_V8_123455_LOG),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git log -1 --format=%H 3.22.4", "push_hash\n"),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
@@ -1056,12 +1145,9 @@ deps = {
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
- Cmd("git fetch", ""),
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
- "origin/candidates"), "push_hash\n"),
- Cmd("git log -1 --format=%B push_hash", self.C_V8_123456_LOG),
- Cmd("git log -1 --format=%B abcd123455", self.C_V8_123455_LOG),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git log -1 --format=%H 3.22.4", "push_hash\n"),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
@@ -1101,7 +1187,7 @@ BUG=123,234,345,456,567,v8:123
LOG=N
"""
- def VerifySVNCommit():
+ def VerifyLand():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(msg, commit)
version = FileToText(
@@ -1116,7 +1202,7 @@ LOG=N
Cmd("git status -s -b -uno", "## some_branch\n"),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
- Cmd("git new-branch %s --upstream origin/candidates" %
+ Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"Port ab12345\" "
"--reverse origin/master"),
@@ -1174,16 +1260,16 @@ LOG=N
RL("LGTM"), # Enter LGTM for V8 CL.
Cmd("git cl presubmit", "Presubmit successfull\n"),
Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
- cb=VerifySVNCommit),
+ cb=VerifyLand),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\""
"Version 3.22.5.1 (cherry-pick)"
- "\" origin/candidates",
+ "\" refs/remotes/origin/candidates",
""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\""
"Version 3.22.5.1 (cherry-pick)"
- "\" origin/candidates",
+ "\" refs/remotes/origin/candidates",
"hsh_to_tag"),
Cmd("git tag 3.22.5.1 hsh_to_tag", ""),
Cmd("git push origin 3.22.5.1", ""),
@@ -1205,6 +1291,10 @@ LOG=N
MergeToBranch(TEST_CONFIG, self).Run(args)
def testReleases(self):
+ c_hash1_commit_log = """Update V8 to Version 4.2.71.
+
+Cr-Commit-Position: refs/heads/master@{#5678}
+"""
c_hash2_commit_log = """Revert something.
BUG=12345
@@ -1241,6 +1331,10 @@ git-svn-id: googlecode@123 0039-1c4b
Cr-Commit-Position: refs/heads/candidates@{#345}
"""
+ c_hash_456_commit_log = """Version 4.2.71.
+
+Cr-Commit-Position: refs/heads/4.2.71@{#1}
+"""
json_output = self.MakeEmptyTempFile()
csv_output = self.MakeEmptyTempFile()
@@ -1255,8 +1349,9 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
os.path.join(chrome_dir, "DEPS"))
WriteDEPS(567)
- def ResetVersion(minor, build, patch=0):
- return lambda: self.WriteFakeVersionFile(minor=minor,
+ def ResetVersion(major, minor, build, patch=0):
+ return lambda: self.WriteFakeVersionFile(major=major,
+ minor=minor,
build=build,
patch=patch)
@@ -1269,70 +1364,92 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""),
- Cmd("git branch -r", " branch-heads/3.21\n branch-heads/3.3\n"),
- Cmd("git reset --hard branch-heads/3.3", ""),
- Cmd("git log --format=%H", "hash1\nhash_234"),
- Cmd("git diff --name-only hash1 hash1^", ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git rev-list --max-age=395200 --tags",
+ "bad_tag\nhash_234\nhash_123\nhash_345\nhash_456\n"),
+ Cmd("git describe --tags bad_tag", "3.23.42-1-deadbeef"),
+ Cmd("git describe --tags hash_234", "3.3.1.1"),
+ Cmd("git describe --tags hash_123", "3.21.2"),
+ Cmd("git describe --tags hash_345", "3.22.3"),
+ Cmd("git describe --tags hash_456", "4.2.71"),
Cmd("git diff --name-only hash_234 hash_234^", VERSION_FILE),
Cmd("git checkout -f hash_234 -- %s" % VERSION_FILE, "",
- cb=ResetVersion(3, 1, 1)),
+ cb=ResetVersion(3, 3, 1, 1)),
+ Cmd("git branch -r --contains hash_234", " branch-heads/3.3\n"),
Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
Cmd("git log -1 --format=%s hash_234", ""),
Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
Cmd("git log -1 --format=%ci hash_234", "18:15"),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
- cb=ResetVersion(22, 5)),
- Cmd("git reset --hard branch-heads/3.21", ""),
- Cmd("git log --format=%H", "hash_123\nhash4\nhash5\n"),
+ cb=ResetVersion(3, 22, 5)),
Cmd("git diff --name-only hash_123 hash_123^", VERSION_FILE),
Cmd("git checkout -f hash_123 -- %s" % VERSION_FILE, "",
- cb=ResetVersion(21, 2)),
+ cb=ResetVersion(3, 21, 2)),
+ Cmd("git branch -r --contains hash_123", " branch-heads/3.21\n"),
Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
Cmd("git log -1 --format=%s hash_123", ""),
Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
Cmd("git log -1 --format=%ci hash_123", "03:15"),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
- cb=ResetVersion(22, 5)),
- Cmd("git reset --hard origin/candidates", ""),
- Cmd("git log --format=%H", "hash_345\n"),
+ cb=ResetVersion(3, 22, 5)),
Cmd("git diff --name-only hash_345 hash_345^", VERSION_FILE),
Cmd("git checkout -f hash_345 -- %s" % VERSION_FILE, "",
- cb=ResetVersion(22, 3)),
+ cb=ResetVersion(3, 22, 3)),
+ Cmd("git branch -r --contains hash_345", " origin/candidates\n"),
Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
Cmd("git log -1 --format=%s hash_345", ""),
Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
Cmd("git log -1 --format=%ci hash_345", ""),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
- cb=ResetVersion(22, 5)),
- Cmd("git reset --hard origin/master", ""),
+ cb=ResetVersion(3, 22, 5)),
+ Cmd("git diff --name-only hash_456 hash_456^", VERSION_FILE),
+ Cmd("git checkout -f hash_456 -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(4, 2, 71)),
+ Cmd("git branch -r --contains hash_456", " origin/4.2.71\n"),
+ Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
+ Cmd("git log -1 --format=%H 4.2.71", "hash_456"),
+ Cmd("git log -1 --format=%s hash_456", "Version 4.2.71"),
+ Cmd("git log -1 --format=%H hash_456^", "master_456"),
+ Cmd("git log -1 --format=%B master_456",
+ "Cr-Commit-Position: refs/heads/master@{#456}"),
+ Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
+ Cmd("git log -1 --format=%ci hash_456", "02:15"),
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(3, 22, 5)),
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], "",
cwd=chrome_dir),
Cmd("git fetch origin", "", cwd=chrome_v8_dir),
- Cmd("git log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\nc_hash3\n",
+ Cmd("git log --format=%H --grep=\"V8\"",
+ "c_hash0\nc_hash1\nc_hash2\nc_hash3\n",
+ cwd=chrome_dir),
+ Cmd("git diff --name-only c_hash0 c_hash0^", "", cwd=chrome_dir),
+ Cmd("git diff --name-only c_hash1 c_hash1^", "DEPS", cwd=chrome_dir),
+ Cmd("git checkout -f c_hash1 -- DEPS", "",
+ cb=ResetDEPS("hash_456"),
+ cwd=chrome_dir),
+ Cmd("git log -1 --format=%B c_hash1", c_hash1_commit_log,
cwd=chrome_dir),
- Cmd("git diff --name-only c_hash1 c_hash1^", "", cwd=chrome_dir),
Cmd("git diff --name-only c_hash2 c_hash2^", "DEPS", cwd=chrome_dir),
Cmd("git checkout -f c_hash2 -- DEPS", "",
- cb=ResetDEPS("0123456789012345678901234567890123456789"),
+ cb=ResetDEPS("hash_345"),
cwd=chrome_dir),
Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log,
cwd=chrome_dir),
- Cmd("git log -1 --format=%B 0123456789012345678901234567890123456789",
- self.C_V8_22624_LOG, cwd=chrome_v8_dir),
Cmd("git diff --name-only c_hash3 c_hash3^", "DEPS", cwd=chrome_dir),
- Cmd("git checkout -f c_hash3 -- DEPS", "", cb=ResetDEPS(345),
+ Cmd("git checkout -f c_hash3 -- DEPS", "", cb=ResetDEPS("deadbeef"),
cwd=chrome_dir),
Cmd("git log -1 --format=%B c_hash3", c_hash3_commit_log,
cwd=chrome_dir),
- Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS(567),
+ Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS("hash_567"),
cwd=chrome_dir),
Cmd("git branch -r", " weird/123\n branch-heads/7\n", cwd=chrome_dir),
- Cmd("git checkout -f branch-heads/7 -- DEPS", "", cb=ResetDEPS(345),
+ Cmd("git checkout -f branch-heads/7 -- DEPS", "",
+ cb=ResetDEPS("hash_345"),
cwd=chrome_dir),
- Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS(567),
+ Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS("hash_567"),
cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
@@ -1347,20 +1464,36 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
Releases(TEST_CONFIG, self).Run(args)
# Check expected output.
- csv = ("3.22.3,candidates,345,3456:4566,\r\n"
+ csv = ("4.2.71,4.2.71,1,5678,\r\n"
+ "3.22.3,candidates,345,4567:5677,\r\n"
"3.21.2,3.21,123,,\r\n"
"3.3.1.1,3.3,234,,abc12\r\n")
self.assertEquals(csv, FileToText(csv_output))
expected_json = [
{
+ "revision": "1",
+ "revision_git": "hash_456",
+ "master_position": "456",
+ "master_hash": "master_456",
+ "patches_merged": "",
+ "version": "4.2.71",
+ "chromium_revision": "5678",
+ "branch": "4.2.71",
+ "review_link": "",
+ "date": "02:15",
+ "chromium_branch": "",
+ # FIXME(machenbach): Fix revisions link for git.
+ "revision_link": "https://code.google.com/p/v8/source/detail?r=1",
+ },
+ {
"revision": "345",
"revision_git": "hash_345",
- "bleeding_edge": "",
- "bleeding_edge_git": "",
+ "master_position": "",
+ "master_hash": "",
"patches_merged": "",
"version": "3.22.3",
- "chromium_revision": "3456:4566",
+ "chromium_revision": "4567:5677",
"branch": "candidates",
"review_link": "",
"date": "",
@@ -1371,8 +1504,8 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
"revision": "123",
"revision_git": "hash_123",
"patches_merged": "",
- "bleeding_edge": "",
- "bleeding_edge_git": "",
+ "master_position": "",
+ "master_hash": "",
"version": "3.21.2",
"chromium_revision": "",
"branch": "3.21",
@@ -1385,8 +1518,8 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
"revision": "234",
"revision_git": "hash_234",
"patches_merged": "abc12",
- "bleeding_edge": "",
- "bleeding_edge_git": "",
+ "master_position": "",
+ "master_hash": "",
"version": "3.3.1.1",
"chromium_revision": "",
"branch": "3.3",
@@ -1399,95 +1532,6 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
self.assertEquals(expected_json, json.loads(FileToText(json_output)))
- def _bumpUpVersion(self):
- self.WriteFakeVersionFile()
-
- def ResetVersion(minor, build, patch=0):
- return lambda: self.WriteFakeVersionFile(minor=minor,
- build=build,
- patch=patch)
-
- return [
- Cmd("git status -s -uno", ""),
- Cmd("git checkout -f master", "", cb=ResetVersion(11, 4)),
- Cmd("git pull", ""),
- Cmd("git branch", ""),
- Cmd("git checkout -f master", ""),
- Cmd("git log -1 --format=%H", "latest_hash"),
- Cmd("git diff --name-only latest_hash latest_hash^", ""),
- URL("https://v8-status.appspot.com/lkgr", "12345"),
- Cmd("git checkout -f master", ""),
- Cmd(("git log --format=%H --grep="
- "\"^git-svn-id: [^@]*@12345 [A-Za-z0-9-]*$\""),
- "lkgr_hash"),
- Cmd("git new-branch auto-bump-up-version --upstream lkgr_hash", ""),
- Cmd("git checkout -f master", ""),
- Cmd("git branch", "auto-bump-up-version\n* master"),
- Cmd("git branch -D auto-bump-up-version", ""),
- Cmd("git diff --name-only lkgr_hash lkgr_hash^", ""),
- Cmd("git checkout -f candidates", "", cb=ResetVersion(11, 5)),
- Cmd("git pull", ""),
- URL("https://v8-status.appspot.com/current?format=json",
- "{\"message\": \"Tree is open\"}"),
- Cmd("git new-branch auto-bump-up-version --upstream master", "",
- cb=ResetVersion(11, 4)),
- Cmd("git commit -am \"[Auto-roll] Bump up version to 3.11.6.0\n\n"
- "TBR=author@chromium.org\" "
- "--author \"author@chromium.org <author@chromium.org>\"", ""),
- ]
-
- def testBumpUpVersionGit(self):
- expectations = self._bumpUpVersion()
- expectations += [
- Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
- "--bypass-hooks", ""),
- Cmd("git cl land -f --bypass-hooks", ""),
- Cmd("git checkout -f master", ""),
- Cmd("git branch", "auto-bump-up-version\n* master"),
- Cmd("git branch -D auto-bump-up-version", ""),
- ]
- self.Expect(expectations)
-
- BumpUpVersion(TEST_CONFIG, self).Run(["-a", "author@chromium.org"])
-
-
- # Test that we bail out if the last change was a version change.
- def testBumpUpVersionBailout1(self):
- self._state["latest"] = "latest_hash"
-
- self.Expect([
- Cmd("git diff --name-only latest_hash latest_hash^", VERSION_FILE),
- ])
-
- self.assertEquals(0,
- self.RunStep(BumpUpVersion, LastChangeBailout, ["--dry_run"]))
-
- # Test that we bail out if the lkgr was a version change.
- def testBumpUpVersionBailout2(self):
- self._state["lkgr"] = "lkgr_hash"
-
- self.Expect([
- Cmd("git diff --name-only lkgr_hash lkgr_hash^", VERSION_FILE),
- ])
-
- self.assertEquals(0,
- self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
-
- # Test that we bail out if the last version is already newer than the lkgr's
- # version.
- def testBumpUpVersionBailout3(self):
- self._state["lkgr"] = "lkgr_hash"
- self._state["lkgr_version"] = "3.22.4.0"
- self._state["latest_version"] = "3.22.5.0"
-
- self.Expect([
- Cmd("git diff --name-only lkgr_hash lkgr_hash^", ""),
- ])
-
- self.assertEquals(0,
- self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
-
-
class SystemTest(unittest.TestCase):
def testReload(self):
options = ScriptsBase(
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index a6fdf3187d..ec79cbb51d 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -66,6 +66,8 @@ SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
"arm",
"ia32",
+ "ppc",
+ "ppc64",
"mipsel",
"nacl_ia32",
"nacl_x64",
@@ -377,7 +379,8 @@ def Execute(arch, mode, args, options, suites, workspace):
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
- False) # No predictable mode.
+ False, # No predictable mode.
+ False) # No no_harness mode.
# Find available test suites and read test cases from them.
variables = {
@@ -394,6 +397,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"tsan": False,
"msan": False,
"dcheck_always_on": options.dcheck_always_on,
+ "byteorder": sys.byteorder,
}
all_tests = []
num_tests = 0
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index d68d1f86bd..8627319359 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -81,17 +81,46 @@ TEST_MAP = {
}
TIMEOUT_DEFAULT = 60
-TIMEOUT_SCALEFACTOR = {"debug" : 4,
- "release" : 1 }
VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
-MODE_FLAGS = {
- "debug" : ["--nohard-abort", "--nodead-code-elimination",
- "--nofold-constants", "--enable-slow-asserts",
- "--debug-code", "--verify-heap"],
- "release" : ["--nohard-abort", "--nodead-code-elimination",
- "--nofold-constants"]}
+DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
+ "--nofold-constants", "--enable-slow-asserts",
+ "--debug-code", "--verify-heap"]
+RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
+ "--nofold-constants"]
+
+MODES = {
+ "debug": {
+ "flags": DEBUG_FLAGS,
+ "timeout_scalefactor": 4,
+ "status_mode": "debug",
+ "execution_mode": "debug",
+ "output_folder": "debug",
+ },
+ "optdebug": {
+ "flags": DEBUG_FLAGS,
+ "timeout_scalefactor": 4,
+ "status_mode": "debug",
+ "execution_mode": "debug",
+ "output_folder": "optdebug",
+ },
+ "release": {
+ "flags": RELEASE_FLAGS,
+ "timeout_scalefactor": 1,
+ "status_mode": "release",
+ "execution_mode": "release",
+ "output_folder": "release",
+ },
+ # This mode requires v8 to be compiled with dchecks and slow dchecks.
+ "tryrelease": {
+ "flags": RELEASE_FLAGS + ["--enable-slow-asserts"],
+ "timeout_scalefactor": 2,
+ "status_mode": "debug",
+ "execution_mode": "release",
+ "output_folder": "release",
+ },
+}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-queue-length=64",
@@ -109,6 +138,8 @@ SUPPORTED_ARCHS = ["android_arm",
"mips64el",
"nacl_ia32",
"nacl_x64",
+ "ppc",
+ "ppc64",
"x64",
"x32",
"arm64"]
@@ -173,6 +204,9 @@ def BuildOptions():
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default="release,debug")
+ result.add_option("--no-harness", "--noharness",
+ help="Run without test harness of a given suite",
+ default=False, action="store_true")
result.add_option("--no-i18n", "--noi18n",
help="Skip internationalization tests",
default=False, action="store_true")
@@ -272,7 +306,7 @@ def ProcessOptions(options):
options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
options.mode = options.mode.split(",")
for mode in options.mode:
- if not mode.lower() in ["debug", "release", "optdebug"]:
+ if not mode.lower() in MODES:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
@@ -454,18 +488,20 @@ def Execute(arch, mode, args, options, suites, workspace):
shell_dir = options.shell_dir
if not shell_dir:
if options.buildbot:
+ # TODO(machenbach): Get rid of different output folder location on
+ # buildbot. Currently this is capitalized Release and Debug.
shell_dir = os.path.join(workspace, options.outdir, mode)
mode = mode.lower()
else:
- shell_dir = os.path.join(workspace, options.outdir,
- "%s.%s" % (arch, mode))
+ shell_dir = os.path.join(
+ workspace,
+ options.outdir,
+ "%s.%s" % (arch, MODES[mode]["output_folder"]),
+ )
shell_dir = os.path.relpath(shell_dir)
- if mode == "optdebug":
- mode = "debug" # "optdebug" is just an alias.
-
# Populate context object.
- mode_flags = MODE_FLAGS[mode]
+ mode_flags = MODES[mode]["flags"]
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
@@ -474,13 +510,13 @@ def Execute(arch, mode, args, options, suites, workspace):
else:
timeout = TIMEOUT_DEFAULT;
- timeout *= TIMEOUT_SCALEFACTOR[mode]
+ timeout *= MODES[mode]["timeout_scalefactor"]
if options.predictable:
# Predictable mode is slower.
timeout *= 2
- ctx = context.Context(arch, mode, shell_dir,
+ ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
@@ -490,11 +526,13 @@ def Execute(arch, mode, args, options, suites, workspace):
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
- options.predictable)
+ options.predictable,
+ options.no_harness)
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
- arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el'] and \
+ arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \
+ 'ppc', 'ppc64'] and \
ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
@@ -503,7 +541,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"deopt_fuzzer": False,
"gc_stress": options.gc_stress,
"isolates": options.isolates,
- "mode": mode,
+ "mode": MODES[mode]["status_mode"],
"no_i18n": options.no_i18n,
"no_snap": options.no_snap,
"simulator_run": simulator_run,
@@ -512,6 +550,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"tsan": options.tsan,
"msan": options.msan,
"dcheck_always_on": options.dcheck_always_on,
+ "byteorder": sys.byteorder,
}
all_tests = []
num_tests = 0
@@ -555,7 +594,8 @@ def Execute(arch, mode, args, options, suites, workspace):
progress_indicator, options.junitout, options.junittestsuite)
if options.json_test_results:
progress_indicator = progress.JsonTestProgressIndicator(
- progress_indicator, options.json_test_results, arch, mode)
+ progress_indicator, options.json_test_results, arch,
+ MODES[mode]["execution_mode"])
run_networked = not options.no_network
if not run_networked:
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index a313f0509f..a52fa566b8 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -53,9 +53,10 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
+for var in ["debug", "release", "big", "little",
+ "android_arm", "android_arm64", "android_ia32", "android_x87",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
- "nacl_x64", "macos", "windows", "linux"]:
+ "nacl_x64", "ppc", "ppc64", "macos", "windows", "linux", "aix"]:
VARIABLES[var] = var
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 84f07feefb..f3178d87e1 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -38,7 +38,7 @@ from ..objects import testcase
VARIANT_FLAGS = {
"default": [],
"stress": ["--stress-opt", "--always-opt"],
- "turbofan": ["--turbo-asm", "--turbo-filter=*", "--always-opt"],
+ "turbofan": ["--turbo-deoptimization", "--turbo-filter=*", "--always-opt"],
"nocrankshaft": ["--nocrankshaft"]}
FAST_VARIANT_FLAGS = [
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index 7bc21b1fc0..13bd28012e 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -73,6 +73,8 @@ def GuessOS():
return 'solaris'
elif system == 'NetBSD':
return 'netbsd'
+ elif system == 'AIX':
+ return 'aix'
else:
return None
@@ -99,6 +101,8 @@ def DefaultArch():
return 'ia32'
elif machine == 'amd64':
return 'ia32'
+ elif machine == 'ppc64':
+ return 'ppc'
else:
return None
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
index 937d9089f3..b76e562809 100644
--- a/deps/v8/tools/testrunner/objects/context.py
+++ b/deps/v8/tools/testrunner/objects/context.py
@@ -30,7 +30,7 @@ class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
no_sorting, rerun_failures_count, rerun_failures_max,
- predictable):
+ predictable, no_harness):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -46,16 +46,18 @@ class Context():
self.rerun_failures_count = rerun_failures_count
self.rerun_failures_max = rerun_failures_max
self.predictable = predictable
+ self.no_harness = no_harness
def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
self.command_prefix, self.extra_flags, self.noi18n,
self.random_seed, self.no_sorting, self.rerun_failures_count,
- self.rerun_failures_max, self.predictable]
+ self.rerun_failures_max, self.predictable, self.no_harness]
@staticmethod
def Unpack(packed):
# For the order of the fields, refer to Pack() above.
return Context(packed[0], packed[1], None, packed[2], False,
packed[3], packed[4], packed[5], packed[6], packed[7],
- packed[8], packed[9], packed[10], packed[11], packed[12])
+ packed[8], packed[9], packed[10], packed[11], packed[12],
+ packed[13])
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
index 8ba3326f63..16054186af 100644
--- a/deps/v8/tools/tickprocessor-driver.js
+++ b/deps/v8/tools/tickprocessor-driver.js
@@ -75,6 +75,7 @@ var tickProcessor = new TickProcessor(
snapshotLogProcessor,
params.distortion,
params.range,
- sourceMap);
+ sourceMap,
+ params.timedRange);
tickProcessor.processLogFile(params.logFileName);
tickProcessor.printStatistics();
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index d54471795f..05f42076bd 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -154,7 +154,8 @@ function TickProcessor(
snapshotLogProcessor,
distortion,
range,
- sourceMap) {
+ sourceMap,
+ timedRange) {
LogReader.call(this, {
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
@@ -187,10 +188,12 @@ function TickProcessor(
'function-move': null,
'function-delete': null,
'heap-sample-item': null,
+ 'current-time': null, // Handled specially, not parsed.
// Obsolete row types.
'code-allocate': null,
'begin-code-region': null,
- 'end-code-region': null });
+ 'end-code-region': null },
+ timedRange);
this.cppEntriesProvider_ = cppEntriesProvider;
this.callGraphSize_ = callGraphSize;
@@ -292,7 +295,7 @@ TickProcessor.prototype.isCppCode = function(name) {
TickProcessor.prototype.isJsCode = function(name) {
- return !(name in this.codeTypes_);
+ return name !== "UNKNOWN" && !(name in this.codeTypes_);
};
@@ -875,7 +878,9 @@ function ArgumentsProcessor(args) {
'--distortion': ['distortion', 0,
'Specify the logging overhead in picoseconds'],
'--source-map': ['sourceMap', null,
- 'Specify the source map that should be used for output']
+ 'Specify the source map that should be used for output'],
+ '--timed-range': ['timedRange', true,
+ 'Ignore ticks before first and after last Date.now() call']
};
this.argsDispatch_['--js'] = this.argsDispatch_['-j'];
this.argsDispatch_['--gc'] = this.argsDispatch_['-g'];
@@ -896,7 +901,8 @@ ArgumentsProcessor.DEFAULTS = {
targetRootFS: '',
nm: 'nm',
range: 'auto,auto',
- distortion: 0
+ distortion: 0,
+ timedRange: false
};
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 5e3e841f8d..7a69393705 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -44,8 +44,6 @@ INSTANCE_TYPES = {
90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
0: "INTERNALIZED_STRING_TYPE",
4: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
- 1: "CONS_INTERNALIZED_STRING_TYPE",
- 5: "CONS_ONE_BYTE_INTERNALIZED_STRING_TYPE",
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
6: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
@@ -59,172 +57,182 @@ INSTANCE_TYPES = {
132: "CELL_TYPE",
133: "PROPERTY_CELL_TYPE",
134: "HEAP_NUMBER_TYPE",
- 135: "FOREIGN_TYPE",
- 136: "BYTE_ARRAY_TYPE",
- 137: "FREE_SPACE_TYPE",
- 138: "EXTERNAL_INT8_ARRAY_TYPE",
- 139: "EXTERNAL_UINT8_ARRAY_TYPE",
- 140: "EXTERNAL_INT16_ARRAY_TYPE",
- 141: "EXTERNAL_UINT16_ARRAY_TYPE",
- 142: "EXTERNAL_INT32_ARRAY_TYPE",
- 143: "EXTERNAL_UINT32_ARRAY_TYPE",
- 144: "EXTERNAL_FLOAT32_ARRAY_TYPE",
- 145: "EXTERNAL_FLOAT64_ARRAY_TYPE",
- 146: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
- 147: "FIXED_INT8_ARRAY_TYPE",
- 148: "FIXED_UINT8_ARRAY_TYPE",
- 149: "FIXED_INT16_ARRAY_TYPE",
- 150: "FIXED_UINT16_ARRAY_TYPE",
- 151: "FIXED_INT32_ARRAY_TYPE",
- 152: "FIXED_UINT32_ARRAY_TYPE",
- 153: "FIXED_FLOAT32_ARRAY_TYPE",
- 154: "FIXED_FLOAT64_ARRAY_TYPE",
- 155: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 157: "FILLER_TYPE",
- 158: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 159: "DECLARED_ACCESSOR_INFO_TYPE",
- 160: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 161: "ACCESSOR_PAIR_TYPE",
- 162: "ACCESS_CHECK_INFO_TYPE",
- 163: "INTERCEPTOR_INFO_TYPE",
- 164: "CALL_HANDLER_INFO_TYPE",
- 165: "FUNCTION_TEMPLATE_INFO_TYPE",
- 166: "OBJECT_TEMPLATE_INFO_TYPE",
- 167: "SIGNATURE_INFO_TYPE",
- 168: "TYPE_SWITCH_INFO_TYPE",
- 170: "ALLOCATION_MEMENTO_TYPE",
- 169: "ALLOCATION_SITE_TYPE",
- 171: "SCRIPT_TYPE",
- 172: "CODE_CACHE_TYPE",
- 173: "POLYMORPHIC_CODE_CACHE_TYPE",
- 174: "TYPE_FEEDBACK_INFO_TYPE",
- 175: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 176: "BOX_TYPE",
- 179: "FIXED_ARRAY_TYPE",
- 156: "FIXED_DOUBLE_ARRAY_TYPE",
- 180: "CONSTANT_POOL_ARRAY_TYPE",
- 181: "SHARED_FUNCTION_INFO_TYPE",
- 182: "JS_MESSAGE_OBJECT_TYPE",
- 185: "JS_VALUE_TYPE",
- 186: "JS_DATE_TYPE",
- 187: "JS_OBJECT_TYPE",
- 188: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 189: "JS_GENERATOR_OBJECT_TYPE",
- 190: "JS_MODULE_TYPE",
- 191: "JS_GLOBAL_OBJECT_TYPE",
- 192: "JS_BUILTINS_OBJECT_TYPE",
- 193: "JS_GLOBAL_PROXY_TYPE",
- 194: "JS_ARRAY_TYPE",
- 195: "JS_ARRAY_BUFFER_TYPE",
- 196: "JS_TYPED_ARRAY_TYPE",
- 197: "JS_DATA_VIEW_TYPE",
- 184: "JS_PROXY_TYPE",
- 198: "JS_SET_TYPE",
- 199: "JS_MAP_TYPE",
- 200: "JS_WEAK_MAP_TYPE",
- 201: "JS_WEAK_SET_TYPE",
- 202: "JS_REGEXP_TYPE",
- 203: "JS_FUNCTION_TYPE",
- 183: "JS_FUNCTION_PROXY_TYPE",
- 177: "DEBUG_INFO_TYPE",
- 178: "BREAK_POINT_INFO_TYPE",
+ 135: "MUTABLE_HEAP_NUMBER_TYPE",
+ 136: "FOREIGN_TYPE",
+ 137: "BYTE_ARRAY_TYPE",
+ 138: "FREE_SPACE_TYPE",
+ 139: "EXTERNAL_INT8_ARRAY_TYPE",
+ 140: "EXTERNAL_UINT8_ARRAY_TYPE",
+ 141: "EXTERNAL_INT16_ARRAY_TYPE",
+ 142: "EXTERNAL_UINT16_ARRAY_TYPE",
+ 143: "EXTERNAL_INT32_ARRAY_TYPE",
+ 144: "EXTERNAL_UINT32_ARRAY_TYPE",
+ 145: "EXTERNAL_FLOAT32_ARRAY_TYPE",
+ 146: "EXTERNAL_FLOAT64_ARRAY_TYPE",
+ 147: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
+ 148: "FIXED_INT8_ARRAY_TYPE",
+ 149: "FIXED_UINT8_ARRAY_TYPE",
+ 150: "FIXED_INT16_ARRAY_TYPE",
+ 151: "FIXED_UINT16_ARRAY_TYPE",
+ 152: "FIXED_INT32_ARRAY_TYPE",
+ 153: "FIXED_UINT32_ARRAY_TYPE",
+ 154: "FIXED_FLOAT32_ARRAY_TYPE",
+ 155: "FIXED_FLOAT64_ARRAY_TYPE",
+ 156: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+ 158: "FILLER_TYPE",
+ 159: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 160: "DECLARED_ACCESSOR_INFO_TYPE",
+ 161: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 162: "ACCESSOR_PAIR_TYPE",
+ 163: "ACCESS_CHECK_INFO_TYPE",
+ 164: "INTERCEPTOR_INFO_TYPE",
+ 165: "CALL_HANDLER_INFO_TYPE",
+ 166: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 167: "OBJECT_TEMPLATE_INFO_TYPE",
+ 168: "SIGNATURE_INFO_TYPE",
+ 169: "TYPE_SWITCH_INFO_TYPE",
+ 171: "ALLOCATION_MEMENTO_TYPE",
+ 170: "ALLOCATION_SITE_TYPE",
+ 172: "SCRIPT_TYPE",
+ 173: "CODE_CACHE_TYPE",
+ 174: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 175: "TYPE_FEEDBACK_INFO_TYPE",
+ 176: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 177: "BOX_TYPE",
+ 180: "FIXED_ARRAY_TYPE",
+ 157: "FIXED_DOUBLE_ARRAY_TYPE",
+ 181: "CONSTANT_POOL_ARRAY_TYPE",
+ 182: "SHARED_FUNCTION_INFO_TYPE",
+ 183: "WEAK_CELL_TYPE",
+ 187: "JS_MESSAGE_OBJECT_TYPE",
+ 186: "JS_VALUE_TYPE",
+ 188: "JS_DATE_TYPE",
+ 189: "JS_OBJECT_TYPE",
+ 190: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 191: "JS_GENERATOR_OBJECT_TYPE",
+ 192: "JS_MODULE_TYPE",
+ 193: "JS_GLOBAL_OBJECT_TYPE",
+ 194: "JS_BUILTINS_OBJECT_TYPE",
+ 195: "JS_GLOBAL_PROXY_TYPE",
+ 196: "JS_ARRAY_TYPE",
+ 197: "JS_ARRAY_BUFFER_TYPE",
+ 198: "JS_TYPED_ARRAY_TYPE",
+ 199: "JS_DATA_VIEW_TYPE",
+ 185: "JS_PROXY_TYPE",
+ 200: "JS_SET_TYPE",
+ 201: "JS_MAP_TYPE",
+ 202: "JS_SET_ITERATOR_TYPE",
+ 203: "JS_MAP_ITERATOR_TYPE",
+ 204: "JS_WEAK_MAP_TYPE",
+ 205: "JS_WEAK_SET_TYPE",
+ 206: "JS_REGEXP_TYPE",
+ 207: "JS_FUNCTION_TYPE",
+ 184: "JS_FUNCTION_PROXY_TYPE",
+ 178: "DEBUG_INFO_TYPE",
+ 179: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- 0x08081: (136, "ByteArrayMap"),
+ 0x08081: (137, "ByteArrayMap"),
0x080a9: (129, "MetaMap"),
- 0x080d1: (131, "OddballMap"),
- 0x080f9: (4, "OneByteInternalizedStringMap"),
- 0x08121: (179, "FixedArrayMap"),
- 0x08149: (134, "HeapNumberMap"),
- 0x08171: (137, "FreeSpaceMap"),
- 0x08199: (157, "OnePointerFillerMap"),
- 0x081c1: (157, "TwoPointerFillerMap"),
- 0x081e9: (132, "CellMap"),
- 0x08211: (133, "GlobalPropertyCellMap"),
- 0x08239: (181, "SharedFunctionInfoMap"),
- 0x08261: (179, "NativeContextMap"),
- 0x08289: (130, "CodeMap"),
- 0x082b1: (179, "ScopeInfoMap"),
- 0x082d9: (179, "FixedCOWArrayMap"),
- 0x08301: (156, "FixedDoubleArrayMap"),
- 0x08329: (180, "ConstantPoolArrayMap"),
- 0x08351: (179, "HashTableMap"),
- 0x08379: (128, "SymbolMap"),
- 0x083a1: (64, "StringMap"),
- 0x083c9: (68, "OneByteStringMap"),
- 0x083f1: (65, "ConsStringMap"),
- 0x08419: (69, "ConsOneByteStringMap"),
- 0x08441: (67, "SlicedStringMap"),
- 0x08469: (71, "SlicedOneByteStringMap"),
- 0x08491: (66, "ExternalStringMap"),
- 0x084b9: (74, "ExternalStringWithOneByteDataMap"),
- 0x084e1: (70, "ExternalOneByteStringMap"),
- 0x08509: (82, "ShortExternalStringMap"),
- 0x08531: (90, "ShortExternalStringWithOneByteDataMap"),
- 0x08559: (0, "InternalizedStringMap"),
- 0x08581: (1, "ConsInternalizedStringMap"),
- 0x085a9: (5, "ConsOneByteInternalizedStringMap"),
- 0x085d1: (2, "ExternalInternalizedStringMap"),
- 0x085f9: (10, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x08621: (6, "ExternalOneByteInternalizedStringMap"),
- 0x08649: (18, "ShortExternalInternalizedStringMap"),
- 0x08671: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x08699: (22, "ShortExternalOneByteInternalizedStringMap"),
- 0x086c1: (86, "ShortExternalOneByteStringMap"),
- 0x086e9: (64, "UndetectableStringMap"),
- 0x08711: (68, "UndetectableOneByteStringMap"),
- 0x08739: (138, "ExternalInt8ArrayMap"),
- 0x08761: (139, "ExternalUint8ArrayMap"),
- 0x08789: (140, "ExternalInt16ArrayMap"),
- 0x087b1: (141, "ExternalUint16ArrayMap"),
- 0x087d9: (142, "ExternalInt32ArrayMap"),
- 0x08801: (143, "ExternalUint32ArrayMap"),
- 0x08829: (144, "ExternalFloat32ArrayMap"),
- 0x08851: (145, "ExternalFloat64ArrayMap"),
- 0x08879: (146, "ExternalUint8ClampedArrayMap"),
- 0x088a1: (148, "FixedUint8ArrayMap"),
- 0x088c9: (147, "FixedInt8ArrayMap"),
- 0x088f1: (150, "FixedUint16ArrayMap"),
- 0x08919: (149, "FixedInt16ArrayMap"),
- 0x08941: (152, "FixedUint32ArrayMap"),
- 0x08969: (151, "FixedInt32ArrayMap"),
- 0x08991: (153, "FixedFloat32ArrayMap"),
- 0x089b9: (154, "FixedFloat64ArrayMap"),
- 0x089e1: (155, "FixedUint8ClampedArrayMap"),
- 0x08a09: (179, "NonStrictArgumentsElementsMap"),
- 0x08a31: (179, "FunctionContextMap"),
- 0x08a59: (179, "CatchContextMap"),
- 0x08a81: (179, "WithContextMap"),
- 0x08aa9: (179, "BlockContextMap"),
- 0x08ad1: (179, "ModuleContextMap"),
- 0x08af9: (179, "GlobalContextMap"),
- 0x08b21: (182, "JSMessageObjectMap"),
- 0x08b49: (135, "ForeignMap"),
- 0x08b71: (187, "NeanderMap"),
- 0x08b99: (170, "AllocationMementoMap"),
- 0x08bc1: (169, "AllocationSiteMap"),
- 0x08be9: (173, "PolymorphicCodeCacheMap"),
- 0x08c11: (171, "ScriptMap"),
- 0x08c61: (187, "ExternalMap"),
- 0x08cb1: (176, "BoxMap"),
- 0x08cd9: (158, "DeclaredAccessorDescriptorMap"),
- 0x08d01: (159, "DeclaredAccessorInfoMap"),
- 0x08d29: (160, "ExecutableAccessorInfoMap"),
- 0x08d51: (161, "AccessorPairMap"),
- 0x08d79: (162, "AccessCheckInfoMap"),
- 0x08da1: (163, "InterceptorInfoMap"),
- 0x08dc9: (164, "CallHandlerInfoMap"),
- 0x08df1: (165, "FunctionTemplateInfoMap"),
- 0x08e19: (166, "ObjectTemplateInfoMap"),
- 0x08e41: (167, "SignatureInfoMap"),
- 0x08e69: (168, "TypeSwitchInfoMap"),
- 0x08e91: (172, "CodeCacheMap"),
- 0x08eb9: (174, "TypeFeedbackInfoMap"),
- 0x08ee1: (175, "AliasedArgumentsEntryMap"),
- 0x08f09: (177, "DebugInfoMap"),
- 0x08f31: (178, "BreakPointInfoMap"),
+ 0x080d1: (131, "NullMap"),
+ 0x080f9: (131, "UndefinedMap"),
+ 0x08121: (180, "FixedArrayMap"),
+ 0x08149: (4, "OneByteInternalizedStringMap"),
+ 0x08171: (134, "HeapNumberMap"),
+ 0x08199: (138, "FreeSpaceMap"),
+ 0x081c1: (158, "OnePointerFillerMap"),
+ 0x081e9: (158, "TwoPointerFillerMap"),
+ 0x08211: (131, "TheHoleMap"),
+ 0x08239: (131, "BooleanMap"),
+ 0x08261: (131, "UninitializedMap"),
+ 0x08289: (131, "ExceptionMap"),
+ 0x082b1: (132, "CellMap"),
+ 0x082d9: (133, "GlobalPropertyCellMap"),
+ 0x08301: (182, "SharedFunctionInfoMap"),
+ 0x08329: (135, "MutableHeapNumberMap"),
+ 0x08351: (180, "NativeContextMap"),
+ 0x08379: (130, "CodeMap"),
+ 0x083a1: (180, "ScopeInfoMap"),
+ 0x083c9: (180, "FixedCOWArrayMap"),
+ 0x083f1: (157, "FixedDoubleArrayMap"),
+ 0x08419: (181, "ConstantPoolArrayMap"),
+ 0x08441: (183, "WeakCellMap"),
+ 0x08469: (131, "NoInterceptorResultSentinelMap"),
+ 0x08491: (180, "HashTableMap"),
+ 0x084b9: (180, "OrderedHashTableMap"),
+ 0x084e1: (131, "ArgumentsMarkerMap"),
+ 0x08509: (131, "TerminationExceptionMap"),
+ 0x08531: (128, "SymbolMap"),
+ 0x08559: (64, "StringMap"),
+ 0x08581: (68, "OneByteStringMap"),
+ 0x085a9: (65, "ConsStringMap"),
+ 0x085d1: (69, "ConsOneByteStringMap"),
+ 0x085f9: (67, "SlicedStringMap"),
+ 0x08621: (71, "SlicedOneByteStringMap"),
+ 0x08649: (66, "ExternalStringMap"),
+ 0x08671: (74, "ExternalStringWithOneByteDataMap"),
+ 0x08699: (70, "ExternalOneByteStringMap"),
+ 0x086c1: (70, "NativeSourceStringMap"),
+ 0x086e9: (82, "ShortExternalStringMap"),
+ 0x08711: (90, "ShortExternalStringWithOneByteDataMap"),
+ 0x08739: (0, "InternalizedStringMap"),
+ 0x08761: (2, "ExternalInternalizedStringMap"),
+ 0x08789: (10, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x087b1: (6, "ExternalOneByteInternalizedStringMap"),
+ 0x087d9: (18, "ShortExternalInternalizedStringMap"),
+ 0x08801: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x08829: (22, "ShortExternalOneByteInternalizedStringMap"),
+ 0x08851: (86, "ShortExternalOneByteStringMap"),
+ 0x08879: (139, "ExternalInt8ArrayMap"),
+ 0x088a1: (140, "ExternalUint8ArrayMap"),
+ 0x088c9: (141, "ExternalInt16ArrayMap"),
+ 0x088f1: (142, "ExternalUint16ArrayMap"),
+ 0x08919: (143, "ExternalInt32ArrayMap"),
+ 0x08941: (144, "ExternalUint32ArrayMap"),
+ 0x08969: (145, "ExternalFloat32ArrayMap"),
+ 0x08991: (146, "ExternalFloat64ArrayMap"),
+ 0x089b9: (147, "ExternalUint8ClampedArrayMap"),
+ 0x089e1: (149, "FixedUint8ArrayMap"),
+ 0x08a09: (148, "FixedInt8ArrayMap"),
+ 0x08a31: (151, "FixedUint16ArrayMap"),
+ 0x08a59: (150, "FixedInt16ArrayMap"),
+ 0x08a81: (153, "FixedUint32ArrayMap"),
+ 0x08aa9: (152, "FixedInt32ArrayMap"),
+ 0x08ad1: (154, "FixedFloat32ArrayMap"),
+ 0x08af9: (155, "FixedFloat64ArrayMap"),
+ 0x08b21: (156, "FixedUint8ClampedArrayMap"),
+ 0x08b49: (180, "SloppyArgumentsElementsMap"),
+ 0x08b71: (180, "FunctionContextMap"),
+ 0x08b99: (180, "CatchContextMap"),
+ 0x08bc1: (180, "WithContextMap"),
+ 0x08be9: (180, "BlockContextMap"),
+ 0x08c11: (180, "ModuleContextMap"),
+ 0x08c39: (180, "ScriptContextMap"),
+ 0x08c61: (180, "ScriptContextTableMap"),
+ 0x08c89: (187, "JSMessageObjectMap"),
+ 0x08cb1: (136, "ForeignMap"),
+ 0x08cd9: (189, "NeanderMap"),
+ 0x08d01: (170, "AllocationSiteMap"),
+ 0x08d29: (171, "AllocationMementoMap"),
+ 0x08d51: (174, "PolymorphicCodeCacheMap"),
+ 0x08d79: (172, "ScriptMap"),
+ 0x08dc9: (189, "ExternalMap"),
+ 0x08f09: (177, "BoxMap"),
+ 0x08f31: (161, "ExecutableAccessorInfoMap"),
+ 0x08f59: (162, "AccessorPairMap"),
+ 0x08f81: (163, "AccessCheckInfoMap"),
+ 0x08fa9: (164, "InterceptorInfoMap"),
+ 0x08fd1: (165, "CallHandlerInfoMap"),
+ 0x08ff9: (166, "FunctionTemplateInfoMap"),
+ 0x09021: (167, "ObjectTemplateInfoMap"),
+ 0x09049: (169, "TypeSwitchInfoMap"),
+ 0x09071: (173, "CodeCacheMap"),
+ 0x09099: (175, "TypeFeedbackInfoMap"),
+ 0x090c1: (176, "AliasedArgumentsEntryMap"),
+ 0x090e9: (178, "DebugInfoMap"),
+ 0x09111: (179, "BreakPointInfoMap"),
}
# List of known V8 objects.
@@ -235,47 +243,51 @@ KNOWN_OBJECTS = {
("OLD_POINTER_SPACE", 0x080b1): "TrueValue",
("OLD_POINTER_SPACE", 0x080c1): "FalseValue",
("OLD_POINTER_SPACE", 0x080d1): "UninitializedValue",
- ("OLD_POINTER_SPACE", 0x080e1): "NoInterceptorResultSentinel",
- ("OLD_POINTER_SPACE", 0x080f1): "ArgumentsMarker",
- ("OLD_POINTER_SPACE", 0x08101): "NumberStringCache",
- ("OLD_POINTER_SPACE", 0x08909): "SingleCharacterStringCache",
- ("OLD_POINTER_SPACE", 0x08d11): "StringSplitCache",
- ("OLD_POINTER_SPACE", 0x09119): "RegExpMultipleCache",
- ("OLD_POINTER_SPACE", 0x09521): "TerminationException",
- ("OLD_POINTER_SPACE", 0x09531): "MessageListeners",
- ("OLD_POINTER_SPACE", 0x0954d): "CodeStubs",
- ("OLD_POINTER_SPACE", 0x0ca65): "MegamorphicSymbol",
- ("OLD_POINTER_SPACE", 0x0ca75): "UninitializedSymbol",
- ("OLD_POINTER_SPACE", 0x10ae9): "NonMonomorphicCache",
- ("OLD_POINTER_SPACE", 0x110fd): "PolymorphicCodeCache",
- ("OLD_POINTER_SPACE", 0x11105): "NativesSourceCache",
- ("OLD_POINTER_SPACE", 0x11155): "EmptyScript",
- ("OLD_POINTER_SPACE", 0x11189): "IntrinsicFunctionNames",
- ("OLD_POINTER_SPACE", 0x141a5): "ObservationState",
- ("OLD_POINTER_SPACE", 0x141b1): "FrozenSymbol",
- ("OLD_POINTER_SPACE", 0x141c1): "NonExistentSymbol",
- ("OLD_POINTER_SPACE", 0x141d1): "ElementsTransitionSymbol",
- ("OLD_POINTER_SPACE", 0x141e1): "EmptySlowElementDictionary",
- ("OLD_POINTER_SPACE", 0x1437d): "ObservedSymbol",
- ("OLD_POINTER_SPACE", 0x1438d): "AllocationSitesScratchpad",
- ("OLD_POINTER_SPACE", 0x14795): "MicrotaskState",
- ("OLD_POINTER_SPACE", 0x36241): "StringTable",
- ("OLD_DATA_SPACE", 0x08099): "EmptyDescriptorArray",
- ("OLD_DATA_SPACE", 0x080a1): "EmptyFixedArray",
+ ("OLD_POINTER_SPACE", 0x080e1): "Exception",
+ ("OLD_POINTER_SPACE", 0x080f1): "NoInterceptorResultSentinel",
+ ("OLD_POINTER_SPACE", 0x08101): "ArgumentsMarker",
+ ("OLD_POINTER_SPACE", 0x08111): "NumberStringCache",
+ ("OLD_POINTER_SPACE", 0x08919): "SingleCharacterStringCache",
+ ("OLD_POINTER_SPACE", 0x08d21): "StringSplitCache",
+ ("OLD_POINTER_SPACE", 0x09129): "RegExpMultipleCache",
+ ("OLD_POINTER_SPACE", 0x09531): "TerminationException",
+ ("OLD_POINTER_SPACE", 0x09541): "MessageListeners",
+ ("OLD_POINTER_SPACE", 0x0955d): "CodeStubs",
+ ("OLD_POINTER_SPACE", 0x0f555): "NonMonomorphicCache",
+ ("OLD_POINTER_SPACE", 0x0fb69): "PolymorphicCodeCache",
+ ("OLD_POINTER_SPACE", 0x0fb71): "NativesSourceCache",
+ ("OLD_POINTER_SPACE", 0x0fbe1): "EmptyScript",
+ ("OLD_POINTER_SPACE", 0x0fc1d): "IntrinsicFunctionNames",
+ ("OLD_POINTER_SPACE", 0x15c39): "ObservationState",
+ ("OLD_POINTER_SPACE", 0x15c45): "SymbolRegistry",
+ ("OLD_POINTER_SPACE", 0x16601): "EmptySlowElementDictionary",
+ ("OLD_POINTER_SPACE", 0x1679d): "AllocationSitesScratchpad",
+ ("OLD_POINTER_SPACE", 0x43e61): "StringTable",
+ ("OLD_DATA_SPACE", 0x08081): "EmptyDescriptorArray",
+ ("OLD_DATA_SPACE", 0x08089): "EmptyFixedArray",
("OLD_DATA_SPACE", 0x080a9): "NanValue",
- ("OLD_DATA_SPACE", 0x08141): "EmptyByteArray",
- ("OLD_DATA_SPACE", 0x08149): "EmptyConstantPoolArray",
- ("OLD_DATA_SPACE", 0x0828d): "EmptyExternalInt8Array",
- ("OLD_DATA_SPACE", 0x08299): "EmptyExternalUint8Array",
- ("OLD_DATA_SPACE", 0x082a5): "EmptyExternalInt16Array",
- ("OLD_DATA_SPACE", 0x082b1): "EmptyExternalUint16Array",
- ("OLD_DATA_SPACE", 0x082bd): "EmptyExternalInt32Array",
- ("OLD_DATA_SPACE", 0x082c9): "EmptyExternalUint32Array",
- ("OLD_DATA_SPACE", 0x082d5): "EmptyExternalFloat32Array",
- ("OLD_DATA_SPACE", 0x082e1): "EmptyExternalFloat64Array",
- ("OLD_DATA_SPACE", 0x082ed): "EmptyExternalUint8ClampedArray",
- ("OLD_DATA_SPACE", 0x082f9): "InfinityValue",
- ("OLD_DATA_SPACE", 0x08305): "MinusZeroValue",
- ("CODE_SPACE", 0x138e1): "JsConstructEntryCode",
- ("CODE_SPACE", 0x21361): "JsEntryCode",
+ ("OLD_DATA_SPACE", 0x08159): "EmptyByteArray",
+ ("OLD_DATA_SPACE", 0x08161): "EmptyConstantPoolArray",
+ ("OLD_DATA_SPACE", 0x08241): "EmptyExternalInt8Array",
+ ("OLD_DATA_SPACE", 0x0824d): "EmptyExternalUint8Array",
+ ("OLD_DATA_SPACE", 0x08259): "EmptyExternalInt16Array",
+ ("OLD_DATA_SPACE", 0x08265): "EmptyExternalUint16Array",
+ ("OLD_DATA_SPACE", 0x08271): "EmptyExternalInt32Array",
+ ("OLD_DATA_SPACE", 0x0827d): "EmptyExternalUint32Array",
+ ("OLD_DATA_SPACE", 0x08289): "EmptyExternalFloat32Array",
+ ("OLD_DATA_SPACE", 0x08295): "EmptyExternalFloat64Array",
+ ("OLD_DATA_SPACE", 0x082a1): "EmptyExternalUint8ClampedArray",
+ ("OLD_DATA_SPACE", 0x082ad): "EmptyFixedUint8Array",
+ ("OLD_DATA_SPACE", 0x082b5): "EmptyFixedInt8Array",
+ ("OLD_DATA_SPACE", 0x082bd): "EmptyFixedUint16Array",
+ ("OLD_DATA_SPACE", 0x082c5): "EmptyFixedInt16Array",
+ ("OLD_DATA_SPACE", 0x082cd): "EmptyFixedUint32Array",
+ ("OLD_DATA_SPACE", 0x082d5): "EmptyFixedInt32Array",
+ ("OLD_DATA_SPACE", 0x082dd): "EmptyFixedFloat32Array",
+ ("OLD_DATA_SPACE", 0x082e5): "EmptyFixedFloat64Array",
+ ("OLD_DATA_SPACE", 0x082ed): "EmptyFixedUint8ClampedArray",
+ ("OLD_DATA_SPACE", 0x082f5): "InfinityValue",
+ ("OLD_DATA_SPACE", 0x08301): "MinusZeroValue",
+ ("CODE_SPACE", 0x15fa1): "JsEntryCode",
+ ("CODE_SPACE", 0x243c1): "JsConstructEntryCode",
}
diff --git a/deps/v8/tools/vim/ninja-build.vim b/deps/v8/tools/vim/ninja-build.vim
new file mode 100644
index 0000000000..3e9b8948ca
--- /dev/null
+++ b/deps/v8/tools/vim/ninja-build.vim
@@ -0,0 +1,119 @@
+" Copyright (c) 2015 the V8 project authors. All rights reserved.
+" Use of this source code is governed by a BSD-style license that can be
+" found in the LICENSE file.
+"
+" Adds a "Compile this file" function, using ninja. On Mac, binds Cmd-k to
+" this command. On Windows, Ctrl-F7 (which is the same as the VS default).
+" On Linux, <Leader>o, which is \o by default ("o"=creates .o files)
+"
+" Adds a "Build this target" function, using ninja. This is not bound
+" to any key by default, but can be used via the :CrBuild command.
+" It builds 'd8' by default, but :CrBuild target1 target2 etc works as well,
+" i.e. :CrBuild all or :CrBuild d8 cctest unittests.
+"
+" Requires that gyp has already generated build.ninja files, and that ninja is
+" in your path (which it is automatically if depot_tools is in your path).
+" Bumps the number of parallel jobs in ninja automatically if goma is
+" detected.
+"
+" Add the following to your .vimrc file:
+" so /path/to/src/tools/vim/ninja-build.vim
+
+python << endpython
+import os
+import vim
+
+
+def path_to_current_buffer():
+ """Returns the absolute path of the current buffer."""
+ return vim.current.buffer.name
+
+
+def path_to_source_root():
+ """Returns the absolute path to the V8 source root."""
+ candidate = os.path.dirname(path_to_current_buffer())
+ # This is a list of files that need to identify the src directory. The shorter
+ # it is, the more likely it's wrong. The longer it is, the more likely it is to
+ # break when we rename directories.
+ fingerprints = ['.git', 'build', 'include', 'samples', 'src', 'testing',
+ 'third_party', 'tools']
+ while candidate and not all(
+ [os.path.isdir(os.path.join(candidate, fp)) for fp in fingerprints]):
+ candidate = os.path.dirname(candidate)
+ return candidate
+
+
+def path_to_build_dir(configuration):
+ """Returns <v8_root>/<output_dir>/(Release|Debug)."""
+
+ v8_root = path_to_source_root()
+ sys.path.append(os.path.join(v8_root, 'tools', 'ninja'))
+ from ninja_output import GetNinjaOutputDirectory
+ return GetNinjaOutputDirectory(v8_root, configuration)
+
+
+def compute_ninja_command_for_targets(targets='', configuration=None):
+ flags = []
+ if "use_goma=1" in os.getenv('GYP_DEFINES', '').split(' '):
+ flags = ['-j', '512']
+ build_dir = path_to_build_dir(configuration);
+ build_cmd = ' '.join(['ninja'] + flags + ['-C', build_dir, targets])
+ vim.command('return "%s"' % build_cmd)
+
+
+def compute_ninja_command_for_current_buffer(configuration=None):
+ """Returns the shell command to compile the file in the current buffer."""
+ build_dir = path_to_build_dir(configuration)
+
+ # ninja needs filepaths for the ^ syntax to be relative to the
+ # build directory.
+ file_to_build = path_to_current_buffer()
+ file_to_build = os.path.relpath(file_to_build, build_dir) + '^'
+ if sys.platform == 'win32':
+ # Escape \ for Vim, and ^ for both Vim and shell.
+ file_to_build = file_to_build.replace('\\', '\\\\').replace('^', '^^^^')
+ compute_ninja_command_for_targets(file_to_build, configuration)
+endpython
+
+fun! s:MakeWithCustomCommand(build_cmd)
+ let l:oldmakepgr = &makeprg
+ let &makeprg=a:build_cmd
+ silent make | cwindow
+ if !has('gui_running')
+ redraw!
+ endif
+ let &makeprg = l:oldmakepgr
+endfun
+
+fun! s:NinjaCommandForCurrentBuffer()
+ python compute_ninja_command_for_current_buffer()
+endfun
+
+fun! s:NinjaCommandForTargets(targets)
+ python compute_ninja_command_for_targets(vim.eval('a:targets'))
+endfun
+
+fun! CrCompileFile()
+ call s:MakeWithCustomCommand(s:NinjaCommandForCurrentBuffer())
+endfun
+
+fun! CrBuild(...)
+ let l:targets = a:0 > 0 ? join(a:000, ' ') : ''
+ if (l:targets !~ '\i')
+ let l:targets = 'd8'
+ endif
+ call s:MakeWithCustomCommand(s:NinjaCommandForTargets(l:targets))
+endfun
+
+command! CrCompileFile call CrCompileFile()
+command! -nargs=* CrBuild call CrBuild(<q-args>)
+
+if has('mac')
+ map <D-k> :CrCompileFile<cr>
+ imap <D-k> <esc>:CrCompileFile<cr>
+elseif has('win32')
+ map <C-F7> :CrCompileFile<cr>
+ imap <C-F7> <esc>:CrCompileFile<cr>
+elseif has('unix')
+ map <Leader>o :CrCompileFile<cr>
+endif
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 657e68f42e..5a830c0e12 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -2,7 +2,7 @@ You can modify this file to create no-op changelists.
Try to write something funny. And please don't add trailing whitespace.
-A Smi walks into a bar and says:
+A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up...........
+The Smi looked at them when a crazy v8-autoroll account showed up.